dev_sectors      1678 drivers/ata/libata-scsi.c 	u64 dev_sectors = qc->dev->n_sectors;
dev_sectors      1706 drivers/ata/libata-scsi.c 	if (block >= dev_sectors)
dev_sectors      1708 drivers/ata/libata-scsi.c 	if ((block + n_block) > dev_sectors)
dev_sectors       434 drivers/md/dm-raid.c 	return rs->md.recovery_cp < rs->md.dev_sectors;
dev_sectors       689 drivers/md/dm-raid.c 			rdev->sectors = mddev->dev_sectors;
dev_sectors      1608 drivers/md/dm-raid.c 			if (ds < rs->md.dev_sectors) {
dev_sectors      1624 drivers/md/dm-raid.c 	sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
dev_sectors      1644 drivers/md/dm-raid.c 		dev_sectors *= rs->raid10_copies;
dev_sectors      1645 drivers/md/dm-raid.c 		if (sector_div(dev_sectors, data_stripes))
dev_sectors      1648 drivers/md/dm-raid.c 		array_sectors = (data_stripes + delta_disks) * dev_sectors;
dev_sectors      1652 drivers/md/dm-raid.c 	} else if (sector_div(dev_sectors, data_stripes))
dev_sectors      1657 drivers/md/dm-raid.c 		array_sectors = (data_stripes + delta_disks) * dev_sectors;
dev_sectors      1661 drivers/md/dm-raid.c 			rdev->sectors = dev_sectors;
dev_sectors      1664 drivers/md/dm-raid.c 	mddev->dev_sectors = dev_sectors;
dev_sectors      1673 drivers/md/dm-raid.c static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
dev_sectors      1684 drivers/md/dm-raid.c 		rs->md.recovery_cp = dev_sectors;
dev_sectors      1691 drivers/md/dm-raid.c 				     ? MaxSector : dev_sectors;
dev_sectors      1695 drivers/md/dm-raid.c static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
dev_sectors      1697 drivers/md/dm-raid.c 	if (!dev_sectors)
dev_sectors      1700 drivers/md/dm-raid.c 	else if (dev_sectors == MaxSector)
dev_sectors      1703 drivers/md/dm-raid.c 	else if (__rdev_sectors(rs) < dev_sectors)
dev_sectors      2684 drivers/md/dm-raid.c 	    to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
dev_sectors      2694 drivers/md/dm-raid.c 	if (rs->md.recovery_cp < rs->md.dev_sectors)
dev_sectors      2906 drivers/md/dm-raid.c 			rdev->sectors = mddev->dev_sectors;
dev_sectors      3074 drivers/md/dm-raid.c 	calculated_dev_sectors = rs->md.dev_sectors;
dev_sectors      3532 drivers/md/dm-raid.c 				      mddev->resync_max_sectors : mddev->dev_sectors;
dev_sectors      3732 drivers/md/dm-raid.c 				 rs->md.dev_sectors,
dev_sectors      3961 drivers/md/dm-raid.c 		r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
dev_sectors       244 drivers/md/md-bitmap.c 			    < (rdev->data_offset + mddev->dev_sectors
dev_sectors       254 drivers/md/md-bitmap.c 			if (rdev->data_offset + mddev->dev_sectors
dev_sectors       289 drivers/md/md-faulty.c 		return mddev->dev_sectors;
dev_sectors       362 drivers/md/md-multipath.c 	return mddev->dev_sectors;
dev_sectors      1236 drivers/md/md.c 		mddev->dev_sectors = ((sector_t)sb->size) * 2;
dev_sectors      1377 drivers/md/md.c 	sb->size = mddev->dev_sectors / 2;
dev_sectors      1488 drivers/md/md.c 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
dev_sectors      1749 drivers/md/md.c 		mddev->dev_sectors = le64_to_cpu(sb->size);
dev_sectors      1926 drivers/md/md.c 	sb->size = cpu_to_le64(mddev->dev_sectors);
dev_sectors      2073 drivers/md/md.c 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
dev_sectors      2296 drivers/md/md.c 	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
dev_sectors      2305 drivers/md/md.c 			mddev->dev_sectors = rdev->sectors;
dev_sectors      2537 drivers/md/md.c 	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
dev_sectors      3178 drivers/md/md.c 		    + mddev->dev_sectors > rdev->sectors)
dev_sectors      3273 drivers/md/md.c 	if (sectors < my_mddev->dev_sectors)
dev_sectors      4268 drivers/md/md.c 		    mddev->dev_sectors == 0)
dev_sectors      4528 drivers/md/md.c 		(unsigned long long)mddev->dev_sectors / 2);
dev_sectors      4553 drivers/md/md.c 		if (mddev->dev_sectors == 0 ||
dev_sectors      4554 drivers/md/md.c 		    mddev->dev_sectors > sectors)
dev_sectors      4555 drivers/md/md.c 			mddev->dev_sectors = sectors;
dev_sectors      4890 drivers/md/md.c 		max_sectors = mddev->dev_sectors;
dev_sectors      5652 drivers/md/md.c 			if (mddev->dev_sectors &&
dev_sectors      5653 drivers/md/md.c 			    rdev->data_offset + mddev->dev_sectors
dev_sectors      5736 drivers/md/md.c 	mddev->resync_max_sectors = mddev->dev_sectors;
dev_sectors      5958 drivers/md/md.c 	mddev->dev_sectors = 0;
dev_sectors      6392 drivers/md/md.c 	info.size          = mddev->dev_sectors / 2;
dev_sectors      6393 drivers/md/md.c 	if (info.size != mddev->dev_sectors / 2) /* overflow */
dev_sectors      6940 drivers/md/md.c 	mddev->dev_sectors   = 2 * (sector_t)info->size;
dev_sectors      7001 drivers/md/md.c 	sector_t old_dev_sectors = mddev->dev_sectors;
dev_sectors      7111 drivers/md/md.c 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
dev_sectors      7139 drivers/md/md.c 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
dev_sectors      7823 drivers/md/md.c 		max_sectors = mddev->dev_sectors;
dev_sectors      8548 drivers/md/md.c 		max_sectors = mddev->dev_sectors;
dev_sectors      9145 drivers/md/md.c 	sector_t old_dev_sectors = mddev->dev_sectors;
dev_sectors      9367 drivers/md/md.c 	if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
dev_sectors       311 drivers/md/md.h 	sector_t			dev_sectors;	/* used size of
dev_sectors       665 drivers/md/raid0.c 		rdev->sectors = mddev->dev_sectors;
dev_sectors      2653 drivers/md/raid1.c 	max_sector = mddev->dev_sectors;
dev_sectors      2935 drivers/md/raid1.c 	return mddev->dev_sectors;
dev_sectors      3215 drivers/md/raid1.c 	if (sectors > mddev->dev_sectors &&
dev_sectors      3216 drivers/md/raid1.c 	    mddev->recovery_cp > mddev->dev_sectors) {
dev_sectors      3217 drivers/md/raid1.c 		mddev->recovery_cp = mddev->dev_sectors;
dev_sectors      3220 drivers/md/raid1.c 	mddev->dev_sectors = sectors;
dev_sectors      2932 drivers/md/raid10.c 		return mddev->dev_sectors - sector_nr;
dev_sectors      2936 drivers/md/raid10.c 	max_sector = mddev->dev_sectors;
dev_sectors      3534 drivers/md/raid10.c 		sectors = conf->dev_sectors;
dev_sectors      3564 drivers/md/raid10.c 	conf->dev_sectors = size << conf->geo.chunk_shift;
dev_sectors      3679 drivers/md/raid10.c 	calc_sectors(conf, mddev->dev_sectors);
dev_sectors      3693 drivers/md/raid10.c 			conf->prev.stride = conf->dev_sectors;
dev_sectors      3882 drivers/md/raid10.c 	mddev->dev_sectors = conf->dev_sectors;
dev_sectors      3999 drivers/md/raid10.c 	if (sectors > mddev->dev_sectors &&
dev_sectors      4005 drivers/md/raid10.c 	mddev->dev_sectors = conf->dev_sectors;
dev_sectors      4031 drivers/md/raid10.c 	mddev->dev_sectors = size;
dev_sectors        62 drivers/md/raid10.h 	sector_t		dev_sectors;  /* temp copy of
dev_sectors      5842 drivers/md/raid5.c 		BUG_ON((mddev->dev_sectors &
dev_sectors      5962 drivers/md/raid5.c 	if (last_sector >= mddev->dev_sectors)
dev_sectors      5963 drivers/md/raid5.c 		last_sector = mddev->dev_sectors - 1;
dev_sectors      6027 drivers/md/raid5.c 	sector_t max_sector = mddev->dev_sectors;
dev_sectors      6068 drivers/md/raid5.c 		sector_t rv = mddev->dev_sectors - sector_nr;
dev_sectors      6737 drivers/md/raid5.c 		sectors = mddev->dev_sectors;
dev_sectors      7377 drivers/md/raid5.c 	mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
dev_sectors      7378 drivers/md/raid5.c 	mddev->resync_max_sectors = mddev->dev_sectors;
dev_sectors      7784 drivers/md/raid5.c 	if (sectors > mddev->dev_sectors &&
dev_sectors      7785 drivers/md/raid5.c 	    mddev->recovery_cp > mddev->dev_sectors) {
dev_sectors      7786 drivers/md/raid5.c 		mddev->recovery_cp = mddev->dev_sectors;
dev_sectors      7789 drivers/md/raid5.c 	mddev->dev_sectors = sectors;
dev_sectors      8115 drivers/md/raid5.c 	mddev->dev_sectors = sectors;