/linux-4.1.27/drivers/md/ |
H A D | raid0.h | 5 sector_t zone_end; /* Start of the next zone (in sectors) */ 6 sector_t dev_start; /* Zone offset in real dev (in sectors) */
|
H A D | linear.c | 78 bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; linear_mergeable_bvec() 112 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) linear_size() argument 118 WARN_ONCE(sectors || raid_disks, linear_size() 143 sector_t sectors; rdev_for_each() local 153 sectors = rdev->sectors; rdev_for_each() 154 sector_div(sectors, mddev->chunk_sectors); rdev_for_each() 155 rdev->sectors = sectors * mddev->chunk_sectors; rdev_for_each() 161 conf->array_sectors += rdev->sectors; rdev_for_each() 181 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; 186 conf->disks[i].rdev->sectors; 273 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; linear_make_request() 309 "dev %s: %llu sectors, offset %llu\n", linear_make_request() 313 (unsigned long long)tmp_dev->rdev->sectors, linear_make_request()
|
H A D | raid0.c | 79 sector_t curr_zone_end, sectors; create_strip_zones() local 97 sectors = rdev1->sectors; rdev_for_each() 98 sector_div(sectors, mddev->chunk_sectors); rdev_for_each() 99 rdev1->sectors = sectors * mddev->chunk_sectors; rdev_for_each() 109 (unsigned long long)rdev1->sectors, rdev_for_each() 111 (unsigned long long)rdev2->sectors); rdev_for_each() 117 if (rdev2->sectors == rdev1->sectors) { rdev_for_each() 209 if (!smallest || (rdev1->sectors < smallest->sectors)) rdev_for_each() 219 zone->zone_end = smallest->sectors * cnt; 232 zone->dev_start = smallest->sectors; 238 if (rdev->sectors <= zone->dev_start) { 250 if (!smallest || rdev->sectors < smallest->sectors) { 254 (unsigned long long)rdev->sectors); 259 sectors = (smallest->sectors - zone->dev_start) * c; 260 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", 262 zone->nb_dev, (unsigned long long)sectors); 264 curr_zone_end += sectors; 269 (unsigned long long)smallest->sectors); 392 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid0_size() argument 397 WARN_ONCE(sectors || raid_disks, raid0_size() 401 array_sectors += (rdev->sectors & raid0_size() 457 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", 525 unsigned sectors = chunk_sects - raid0_make_request() local 533 if (sectors < bio_sectors(bio)) { raid0_make_request() 534 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); raid0_make_request() 580 rdev->sectors = mddev->dev_sectors; rdev_for_each()
|
H A D | raid1.c | 275 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", raid_end_bio_io() 293 r1_bio->sector + (r1_bio->sectors); update_head_pos() 378 r1_bio->sectors, close_write() 452 r1_bio->sector, r1_bio->sectors, raid1_end_write_request() 475 pr_debug("raid1: behind end write sectors" raid1_end_write_request() 514 int sectors; read_balance() local 532 sectors = r1_bio->sectors; read_balance() 542 if ((conf->mddev->recovery_cp < this_sector + sectors) || read_balance() 545 this_sector + sectors))) read_balance() 564 rdev->recovery_offset < this_sector + sectors) read_balance() 570 if (is_badblock(rdev, this_sector, sectors, read_balance() 577 best_good_sectors = sectors; read_balance() 586 if (is_badblock(rdev, this_sector, sectors, read_balance() 597 if (choose_first && sectors > bad_sectors) read_balance() 598 sectors = bad_sectors; read_balance() 599 if (best_good_sectors > sectors) read_balance() 600 best_good_sectors = sectors; read_balance() 613 best_good_sectors = sectors; read_balance() 698 sectors = best_good_sectors; read_balance() 703 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; read_balance() 706 *max_sectors = sectors; read_balance() 1147 r1_bio->sectors = bio_sectors(bio); make_request() 1203 if (max_sectors < r1_bio->sectors) { make_request() 1210 r1_bio->sectors = max_sectors; make_request() 1227 r1_bio->sectors = bio_sectors(bio) - sectors_handled; make_request() 1262 max_sectors = r1_bio->sectors; make_request() 1349 if (max_sectors < r1_bio->sectors) { make_request() 1353 r1_bio->sectors = max_sectors; make_request() 1387 r1_bio->sectors, make_request() 1444 r1_bio->sectors = bio_sectors(bio) - sectors_handled; make_request() 1772 long sectors_to_go = r1_bio->sectors; end_sync_write() 1789 r1_bio->sectors, end_sync_write() 1793 r1_bio->sectors, end_sync_write() 1799 int s = r1_bio->sectors; end_sync_write() 1811 int sectors, struct page *page, int rw) r1_sync_page_io() 1813 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) r1_sync_page_io() 1824 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) r1_sync_page_io() 1846 int sectors = r1_bio->sectors; fix_sync_read_error() local 1849 while(sectors) { fix_sync_read_error() 1850 int s = sectors; fix_sync_read_error() 1901 md_done_sync(mddev, r1_bio->sectors, 0); fix_sync_read_error() 1906 sectors -= s; fix_sync_read_error() 1941 sectors -= s; fix_sync_read_error() 1966 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); process_checks() 1980 b->bi_iter.bi_size = r1_bio->sectors << 9; process_checks() 2031 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); process_checks() 2083 int s = r1_bio->sectors; sync_request_write() 2103 sector_t sect, int sectors) fix_read_error() 2106 while(sectors) { fix_read_error() 2107 int s = sectors; fix_read_error() 2175 "(%d sectors at %llu on %s)\n", fix_read_error() 2183 sectors -= s; fix_read_error() 2207 int sectors; narrow_write_error() local 2208 int sect_to_write = r1_bio->sectors; narrow_write_error() 2217 sectors = ((sector + block_sectors) narrow_write_error() 2223 if (sectors > sect_to_write) narrow_write_error() 2224 sectors = sect_to_write; narrow_write_error() 2225 /* Write at 'sector' for 'sectors'*/ narrow_write_error() 2246 wbio->bi_iter.bi_size = r1_bio->sectors << 9; narrow_write_error() 2248 bio_trim(wbio, sector - r1_bio->sector, sectors); narrow_write_error() 2254 sectors, 0) narrow_write_error() 2258 sect_to_write -= sectors; narrow_write_error() 2259 sector += sectors; narrow_write_error() 2260 sectors = block_sectors; narrow_write_error() 2268 int s = r1_bio->sectors; handle_sync_write_finished() 2296 r1_bio->sectors, 0); handle_write_finished() 2338 r1_bio->sector, r1_bio->sectors); handle_read_error() 2378 if (max_sectors < r1_bio->sectors) { handle_read_error() 2383 r1_bio->sectors = max_sectors; handle_read_error() 2396 r1_bio->sectors = bio_sectors(mbio) - sectors_handled; handle_read_error() 2498 int min_bad = 0; /* number of sectors that are bad in all devices */ sync_request() 2627 /* These sectors are bad on all InSync devices, so we sync_request() 2728 r1_bio->sectors = nr_sectors; sync_request() 2753 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid1_size() argument 2755 if (sectors) raid1_size() 2756 return sectors; raid1_size() 2985 static int raid1_resize(struct mddev *mddev, sector_t sectors) raid1_resize() argument 2994 sector_t newsize = raid1_size(mddev, sectors, 0); raid1_resize() 3006 if (sectors > mddev->dev_sectors && raid1_resize() 3011 mddev->dev_sectors = sectors; raid1_resize() 3012 mddev->resync_max_sectors = sectors; raid1_resize() 1810 r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) r1_sync_page_io() argument 2102 fix_read_error(struct r1conf *conf, int read_disk, sector_t sect, int sectors) fix_read_error() argument
|
H A D | raid10.c | 330 r10_bio->devs[slot].addr + (r10_bio->sectors); update_head_pos() 420 r10_bio->sectors, close_write() 505 r10_bio->sectors, raid10_end_write_request() 787 int sectors = r10_bio->sectors; read_balance() local 798 sectors = r10_bio->sectors; read_balance() 811 && (this_sector + sectors >= conf->next_resync)) read_balance() 825 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) read_balance() 832 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) read_balance() 836 if (is_badblock(rdev, dev_sector, sectors, read_balance() 847 if (!do_balance && sectors > bad_sectors) read_balance() 848 sectors = bad_sectors; read_balance() 849 if (best_good_sectors > sectors) read_balance() 850 best_good_sectors = sectors; read_balance() 865 best_good_sectors = sectors; read_balance() 1162 int sectors; __make_request() local 1171 sectors = bio_sectors(bio); __make_request() 1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { __make_request() 1182 sectors); __make_request() 1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) __make_request() 1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && __make_request() 1206 r10_bio->sectors = sectors; __make_request() 1251 if (max_sectors < r10_bio->sectors) { __make_request() 1257 r10_bio->sectors = max_sectors; __make_request() 1274 r10_bio->sectors = bio_sectors(bio) - sectors_handled; __make_request() 1310 max_sectors = r10_bio->sectors; __make_request() 1424 if (max_sectors < r10_bio->sectors) { __make_request() 1428 r10_bio->sectors = max_sectors; __make_request() 1440 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); __make_request() 1526 r10_bio->sectors = bio_sectors(bio) - sectors_handled; __make_request() 1937 atomic_add(r10_bio->sectors, end_sync_read() 1960 sector_t s = r10_bio->sectors; end_sync_request() 2011 r10_bio->sectors, end_sync_write() 2056 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); sync_request_write() 2072 int sectors = r10_bio->sectors; sync_request_write() local 2075 if (sectors < (len / 512)) sync_request_write() 2076 len = sectors * 512; sync_request_write() 2081 sectors -= len/512; sync_request_write() 2085 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); sync_request_write() 2098 tbio->bi_iter.bi_size = r10_bio->sectors << 9; sync_request_write() 2147 md_done_sync(mddev, r10_bio->sectors, 1); sync_request_write() 2154 * Recovery happens across physical sectors. 2175 int sectors = r10_bio->sectors; fix_recovery_read_error() local 2180 while (sectors) { fix_recovery_read_error() 2181 int s = sectors; fix_recovery_read_error() 2240 sectors -= s; fix_recovery_read_error() 2322 int sectors, struct page *page, int rw) r10_sync_page_io() 2327 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) r10_sync_page_io() 2330 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) r10_sync_page_io() 2340 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) r10_sync_page_io() 2356 int sectors = r10_bio->sectors; fix_read_error() local 2390 while(sectors) { fix_read_error() 2391 int s = sectors; fix_read_error() 2476 " (%d sectors at %llu on %s)\n", fix_read_error() 2515 "corrected sectors" fix_read_error() 2516 " (%d sectors at %llu on %s)\n", fix_read_error() 2530 " (%d sectors at %llu on %s)\n", fix_read_error() 2544 sectors -= s; fix_read_error() 2568 int sectors; narrow_write_error() local 2569 int sect_to_write = r10_bio->sectors; narrow_write_error() 2578 sectors = ((r10_bio->sector + block_sectors) narrow_write_error() 2584 if (sectors > sect_to_write) narrow_write_error() 2585 sectors = sect_to_write; narrow_write_error() 2586 /* Write at 'sector' for 'sectors' */ narrow_write_error() 2588 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); narrow_write_error() 2596 sectors, 0) narrow_write_error() 2600 sect_to_write -= sectors; narrow_write_error() 2601 sector += sectors; narrow_write_error() 2602 sectors = block_sectors; narrow_write_error() 2670 if (max_sectors < r10_bio->sectors) { handle_read_error() 2676 r10_bio->sectors = max_sectors; handle_read_error() 2688 r10_bio->sectors = bio_sectors(mbio) - sectors_handled; handle_read_error() 2724 r10_bio->sectors, 0); handle_write_completed() 2729 r10_bio->sectors, 0)) handle_write_completed() 2740 r10_bio->sectors, 0); handle_write_completed() 2745 r10_bio->sectors, 0)) handle_write_completed() 2759 r10_bio->sectors, 0); handle_write_completed() 2776 r10_bio->sectors, 0); handle_write_completed() 3264 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; sync_request() 3378 r10_bio->sectors = nr_sectors; sync_request() 3386 r10_bio->sectors = nr_sectors; sync_request() 3417 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid10_size() argument 3425 if (!sectors) raid10_size() 3426 sectors = conf->dev_sectors; raid10_size() 3428 size = sectors >> conf->geo.chunk_shift; raid10_size() 3438 /* Calculate the number of sectors-per-device that will calc_sectors() 3823 static int raid10_resize(struct mddev *mddev, sector_t sectors) raid10_resize() argument 3827 * number of sectors used to be an appropriate multiple raid10_resize() 3847 size = raid10_size(mddev, sectors, 0); raid10_resize() 3859 if (sectors > mddev->dev_sectors && raid10_resize() 3864 calc_sectors(conf, sectors); raid10_resize() 3898 rdev->sectors = size; rdev_for_each() 4274 /* If restarting in the middle, skip the initial sectors */ reshape_request() 4372 r10_bio->sectors = last - sector_nr + 1; reshape_request() 4460 r10_bio->sectors = nr_sectors; reshape_request() 4463 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); reshape_request() 4499 md_done_sync(mddev, r10_bio->sectors, 0); reshape_request_write() 4521 md_sync_acct(b->bi_bdev, r10_bio->sectors); reshape_request_write() 4559 int sectors = r10_bio->sectors; handle_reshape_read_error() local 4573 while (sectors) { handle_reshape_read_error() 4574 int s = sectors; handle_reshape_read_error() 4611 sectors -= s; handle_reshape_read_error() 4649 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); end_reshape_request() 2321 r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) r10_sync_page_io() argument
|
H A D | dm-stats.c | 25 unsigned long long sectors[2]; member in struct:dm_stat_percpu 482 p->sectors[idx] += len; dm_stat_for_entry() 583 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); for_each_possible_cpu() 584 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); for_each_possible_cpu() 611 p->sectors[READ] -= shared->tmp.sectors[READ]; __dm_stat_clear() 612 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; __dm_stat_clear() 714 shared->tmp.sectors[READ], dm_stats_print() 718 shared->tmp.sectors[WRITE], dm_stats_print()
|
H A D | md.c | 257 unsigned int sectors; md_make_request() local 288 * save the sectors now since our bio can md_make_request() 291 sectors = bio_sectors(bio); md_make_request() 298 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); md_make_request() 697 /* return the offset of the super block in 512byte sectors */ calc_dev_sboffset() 722 rdev->sectors = 0; md_rdev_clear() 968 * Calculate the position of the superblock (512byte sectors), super_90_load() 1041 rdev->sectors = rdev->sb_start; super_90_load() 1046 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) super_90_load() 1047 rdev->sectors = (2ULL << 32) - 2; super_90_load() 1049 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) super_90_load() 1338 * 4TB == 2^32 KB, or 2*2^32 sectors. super_90_rdev_size_change() 1381 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1388 sector_t sectors; super_1_load() local 1393 * Calculate the position of the superblock in 512byte sectors. super_1_load() 1487 int sectors = le16_to_cpu(sb->bblog_size); super_1_load() local 1488 if (sectors > (PAGE_SIZE / 512)) super_1_load() 1494 if (!sync_page_io(rdev, bb_sector, sectors << 9, super_1_load() 1499 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { super_1_load() 1539 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); super_1_load() 1540 sectors -= rdev->data_offset; super_1_load() 1542 sectors = rdev->sb_start; super_1_load() 1543 if (sectors < le64_to_cpu(sb->data_size)) super_1_load() 1545 rdev->sectors = le64_to_cpu(sb->data_size); super_1_load() 1718 sb->data_size = cpu_to_le64(rdev->sectors); super_1_sync() 1848 max_sectors = rdev->sectors + sb_start - rdev->sb_start; super_1_rdev_size_change() 2036 /* make sure rdev->sectors exceeds mddev->dev_sectors */ bind_rdev_to_array() 2037 if (rdev->sectors && (mddev->dev_sectors == 0 || bind_rdev_to_array() 2038 rdev->sectors < mddev->dev_sectors)) { bind_rdev_to_array() 2047 mddev->dev_sectors = rdev->sectors; bind_rdev_to_array() 2750 if (rdev->sectors && rdev->mddev->external) offset_store() 2786 + mddev->dev_sectors > rdev->sectors) new_offset_store() 2823 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); rdev_size_show() 2836 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) strict_blocks_to_sectors() argument 2851 *sectors = new; strict_blocks_to_sectors() 2859 sector_t oldsectors = rdev->sectors; rdev_size_store() 2860 sector_t sectors; rdev_size_store() local 2862 if (strict_blocks_to_sectors(buf, §ors) < 0) rdev_size_store() 2868 sectors = super_types[my_mddev->major_version]. rdev_size_store() 2869 rdev_size_change(rdev, sectors); rdev_size_store() 2870 if (!sectors) rdev_size_store() 2872 } else if (!sectors) rdev_size_store() 2873 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - rdev_size_store() 2879 if (sectors < my_mddev->dev_sectors) rdev_size_store() 2882 rdev->sectors = sectors; rdev_size_store() 2883 if (sectors > oldsectors && my_mddev->external) { rdev_size_store() 2901 overlaps(rdev->data_offset, rdev->sectors, rdev_for_each() 2903 rdev2->sectors)) { rdev_for_each() 2920 rdev->sectors = oldsectors; 4075 sector_t sectors; size_store() local 4076 int err = strict_blocks_to_sectors(buf, §ors); size_store() 4086 err = update_size(mddev, sectors); size_store() 4092 mddev->dev_sectors > sectors) size_store() 4093 mddev->dev_sectors = sectors; size_store() 4697 sector_t sectors; array_size_store() local 4706 sectors = mddev->pers->size(mddev, 0, 0); array_size_store() 4708 sectors = mddev->array_sectors; array_size_store() 4712 if (strict_blocks_to_sectors(buf, §ors) < 0) array_size_store() 4714 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) array_size_store() 4721 mddev->array_sectors = sectors; array_size_store() 5979 rdev->sectors = rdev->sb_start; add_new_disk() 6063 rdev->sectors = rdev->sb_start; hot_add_disk() 6294 /* The "num_sectors" is the number of sectors of each device that update_size() 6310 sector_t avail = rdev->sectors; rdev_for_each() 6514 * 4 sectors (with a BIG number of cylinders...). This drives 6522 geo->sectors = 4; md_getgeo() 7146 * near the end of resync when the number of remaining sectors status_resync() 7156 rt = max_sectors - resync; /* number of remaining sectors */ status_resync() 7232 sector_t sectors; md_seq_show() local 7264 sectors = 0; md_seq_show() 7280 sectors += rdev->sectors; rdev_for_each_rcu() 7291 (unsigned long long)sectors / 2); 7458 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + rdev_for_each_rcu() 7459 (int)part_stat_read(&disk->part0, sectors[1]) - rdev_for_each_rcu() 7795 sector_t sectors; local 7835 sectors = mddev->pers->sync_request(mddev, j, &skipped); 7836 if (sectors == 0) { 7842 io_sectors += sectors; 7843 atomic_add(sectors, &mddev->recovery_active); 7849 j += sectors; 8306 rdev->sectors += rdev->data_offset - rdev->new_data_offset; rdev_for_each() 8308 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; rdev_for_each() 8318 * Length of bad-range, in sectors: 0-511 for lengths 1-512 8340 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, md_is_badblock() argument 8347 sector_t target = s + sectors; md_is_badblock() 8355 sectors = target - s; md_is_badblock() 8420 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, md_set_badblocks() argument 8434 sector_t next = s + sectors; md_set_badblocks() 8438 sectors = next - s; md_set_badblocks() 8467 if (s == a && s + sectors >= e) md_set_badblocks() 8473 if (e < s + sectors) md_set_badblocks() 8474 e = s + sectors; md_set_badblocks() 8486 sectors = e - s; md_set_badblocks() 8489 if (sectors && hi < bb->count) { md_set_badblocks() 8495 if (a <= s + sectors) { md_set_badblocks() 8497 if (e <= s + sectors) { md_set_badblocks() 8499 e = s + sectors; md_set_badblocks() 8512 sectors = e - s; md_set_badblocks() 8517 if (sectors == 0 && hi < bb->count) { md_set_badblocks() 8533 while (sectors) { md_set_badblocks() 8541 int this_sectors = sectors; md_set_badblocks() 8549 sectors -= this_sectors; md_set_badblocks() 8562 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, rdev_set_badblocks() argument 8571 s, sectors, 0); rdev_set_badblocks() 8588 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) md_clear_badblocks() argument 8592 sector_t target = s + sectors; md_clear_badblocks() 8605 sectors = target - s; md_clear_badblocks() 8677 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, rdev_clear_badblocks() argument 8685 s, sectors); rdev_clear_badblocks()
|
H A D | md.h | 43 sector_t sectors; /* Device size (in 512bytes sectors) */ member in struct:md_rdev 60 sector_t sb_start; /* offset of the super block (in 512byte sectors) */ 119 int shift; /* shift from sectors to block size 127 sector_t size; /* in sectors */ 189 extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 191 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, is_badblock() argument 196 sectors, is_badblock() 204 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 206 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 307 atomic64_t resync_mismatches; /* count of sectors where 510 int (*resize) (struct mddev *mddev, sector_t sectors); 511 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
|
H A D | raid10.h | 41 int chunk_shift; /* shift from chunks to sectors */ 97 int sectors; member in struct:r10bio
|
H A D | bitmap.h | 131 __le32 sectors_reserved; /* 64 number of 512-byte sectors that are 147 * (3)This is the number of sectors represented by the bitmap, and is the range that 252 unsigned long sectors, int behind); 254 unsigned long sectors, int success, int behind);
|
H A D | faulty.c | 291 static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) faulty_size() argument 296 if (sectors == 0) faulty_size() 299 return sectors; faulty_size()
|
H A D | dm-raid.c | 313 * @region_size: region size in sectors. If 0, pick a size (4MiB default). 326 * Choose a reasonable default. All figures in sectors. validate_region_size() 331 DMINFO("Choosing default region size of %lu sectors", validate_region_size() 335 region_size = 1 << 13; /* sectors */ validate_region_size() 347 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", validate_region_size() 365 * Convert sectors to bytes. validate_region_size() 475 * <chunk_size> The number of sectors per disk that 487 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 488 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 489 * [region_size <sectors>] Defines granularity of bitmap 623 * In device-mapper, we specify things in sectors, but parse_raid_params() 643 * In device-mapper, we specify things in sectors, but parse_raid_params() 938 DMERR("Reshaping arrays not yet supported. (stripe sectors change)"); super_init_validation() 1498 /* convert from kiB to sectors */ raid_status()
|
H A D | bitmap.c | 1018 * We ignore all bits for sectors that end earlier than 'start'. 1357 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) bitmap_startwrite() argument 1373 while (sectors) { bitmap_startwrite() 1412 if (sectors > blocks) bitmap_startwrite() 1413 sectors -= blocks; bitmap_startwrite() 1415 sectors = 0; bitmap_startwrite() 1421 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, bitmap_endwrite() argument 1434 while (sectors) { bitmap_endwrite() 1466 if (sectors > blocks) bitmap_endwrite() 1467 sectors -= blocks; bitmap_endwrite() 1469 sectors = 0; bitmap_endwrite() 1606 /* For each chunk covered by any of these sectors, set the bitmap_set_memory_bits() 1977 * to current size - in sectors. bitmap_resize() 2221 unsigned long sectors; space_store() local 2224 rv = kstrtoul(buf, 10, §ors); space_store() 2228 if (sectors == 0) space_store() 2232 sectors < (mddev->bitmap->storage.bytes + 511) >> 9) space_store() 2238 mddev->bitmap_info.space = sectors; space_store()
|
H A D | dm-exception-store.h | 189 * Return the number of sectors in the device.
|
H A D | raid1.h | 127 int sectors; member in struct:r1bio
|
H A D | multipath.c | 377 static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) multipath_size() argument 379 WARN_ONCE(sectors || raid_disks, multipath_size()
|
H A D | dm-log-writes.c | 86 * nr_sectors - the number of sectors we wrote. 569 /* No sectors and not a flush? Don't care */ log_writes_map()
|
H A D | raid5.c | 140 int sectors = bio_sectors(bio); r5_next_bio() local 141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) r5_next_bio() 2331 " (%lu sectors at %llu on %s)\n", raid5_end_read_request() 5326 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5353 /* If restarting in the middle, skip the initial sectors */ reshape_request() 6250 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid5_size() argument 6254 if (!sectors) raid5_size() 6255 sectors = mddev->dev_sectors; raid5_size() 6260 sectors &= ~((sector_t)mddev->chunk_sectors - 1); raid5_size() 6261 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); raid5_size() 6262 return sectors * (raid_disks - conf->max_degraded); raid5_size() 7192 static int raid5_resize(struct mddev *mddev, sector_t sectors) raid5_resize() argument 7202 sectors &= ~((sector_t)mddev->chunk_sectors - 1); raid5_resize() 7203 newsize = raid5_size(mddev, sectors, mddev->raid_disks); raid5_resize() 7208 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); raid5_resize() 7215 if (sectors > mddev->dev_sectors && raid5_resize() 7220 mddev->dev_sectors = sectors; raid5_resize() 7221 mddev->resync_max_sectors = sectors; raid5_resize() 7533 sector_t sectors; raid45_takeover_raid0() local 7542 sectors = raid0_conf->strip_zone[0].zone_end; raid45_takeover_raid0() 7543 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); raid45_takeover_raid0() 7544 mddev->dev_sectors = sectors; raid45_takeover_raid0()
|
H A D | dm-table.c | 163 * Append an empty entry to catch sectors beyond the end of alloc_targets() 604 * (in units of 512-byte sectors). validate_hardware_logical_block_alignment() 616 * target, how many sectors must the next target handle? validate_hardware_logical_block_alignment() 638 * If the remaining sectors fall entirely within this validate_hardware_logical_block_alignment()
|
/linux-4.1.27/drivers/scsi/ |
H A D | scsicam.c | 55 * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. 57 * @capacity: size of the disk in sectors 58 * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders 84 and at most 62 sectors per track - this works up to scsicam_bios_param() 112 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table 114 * @capacity: size of the disk in sectors 117 * @secs: put sectors here 225 * minimizes the number of sectors that will be unused at the end 234 unsigned long heads, sectors, cylinders, temp; setsize() local 237 sectors = 62L; /* Maximize sectors per track */ setsize() 239 temp = cylinders * sectors; /* Compute divisor for heads */ setsize() 243 temp = cylinders * heads; /* Compute divisor for sectors */ setsize() 244 sectors = capacity / temp; /* Compute value for sectors per setsize() 247 sectors++; /* Else, increment number of sectors */ setsize() 248 temp = heads * sectors; /* Compute divisor for cylinders */ setsize() 256 *secs = (unsigned int) sectors; setsize()
|
H A D | ps3rom.c | 173 u32 sectors) ps3rom_read_request() 177 dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n", ps3rom_read_request() 178 __func__, __LINE__, sectors, start_sector); ps3rom_read_request() 182 sectors, 0, dev->bounce_lpar, &dev->tag); ps3rom_read_request() 194 u32 sectors) ps3rom_write_request() 198 dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n", ps3rom_write_request() 199 __func__, __LINE__, sectors, start_sector); ps3rom_write_request() 205 sectors, 0, dev->bounce_lpar, &dev->tag); ps3rom_write_request() 171 ps3rom_read_request(struct ps3_storage_device *dev, struct scsi_cmnd *cmd, u32 start_sector, u32 sectors) ps3rom_read_request() argument 192 ps3rom_write_request(struct ps3_storage_device *dev, struct scsi_cmnd *cmd, u32 start_sector, u32 sectors) ps3rom_write_request() argument
|
H A D | sr.h | 41 unsigned xa_flag:1; /* CD has XA sectors ? */
|
H A D | ultrastor.c | 236 unsigned char sectors; member in struct:ultrastor_config 280 unsigned char sectors; member in struct:__anon9693 463 config.sectors = mapping_table[config_2.mapping_mode].sectors; ultrastor_14f_detect() 597 config.sectors = mapping_table[(config_2 >> 3) & 3].sectors; 1038 unsigned int s = config.heads * config.sectors; 1041 dkinfo[1] = config.sectors;
|
H A D | sr.c | 364 * sectors past the last readable block. sr_done() 366 * last 75 2K sectors, we decrease the saved size sr_done() 408 "Finishing %u sectors\n", blk_rq_sectors(rq))); sr_init_command() 423 * we do lazy blocksize switching (when reading XA sectors, sr_init_command() 796 * HP 4020i CD-Recorder reports 2340 byte sectors get_sectorsize() 797 * Philips CD-Writers report 2352 byte sectors get_sectorsize() 799 * Use 2k sectors for them.. get_sectorsize()
|
H A D | dtc.c | 290 * Inputs : size = size of device in sectors (512 bytes), dev = block device 291 * major / minor, ip[] = {heads, sectors, cylinders}
|
H A D | t128.c | 275 * Inputs : size = size of device in sectors (512 bytes), dev = block device 276 * major / minor, ip[] = {heads, sectors, cylinders}
|
H A D | fdomain.c | 1598 unsigned char sectors; fdomain_16x0_biosparam() member in struct:drive_info 1609 The last 4 bytes appear to be the drive's size in sectors. fdomain_16x0_biosparam() 1669 info_array[1] = i.sectors; fdomain_16x0_biosparam() 1677 info_array[1] = i.sectors; fdomain_16x0_biosparam() 1696 c = number of sectors (double word) fdomain_16x0_biosparam() 1716 info_array[1] = p[6] & 0x3f; /* sectors */ fdomain_16x0_biosparam() 1725 info_array[1] = 0x3f; /* sectors = 63 */ fdomain_16x0_biosparam() 1728 info_array[1] = 0x3f; /* sectors = 63 */ fdomain_16x0_biosparam() 1731 info_array[1] = 0x20; /* sectors = 32 */ fdomain_16x0_biosparam()
|
H A D | u14-34f.c | 384 * random seeks over S sectors is S/3. 385 * When (Q-1) requests are uniformly distributed over S sectors, the average 387 * average seek distance for (Q-1) random requests over S sectors is S/Q. 605 unsigned char sectors; member in struct:hostdata 819 unsigned char sectors; port_detect() member in struct:__anon9652 927 HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors; port_detect() 1536 dkinfo[1] = HD(j)->sectors; u14_34f_bios_param() 1537 dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors); u14_34f_bios_param()
|
H A D | pas16.c | 450 * Inputs : size = size of device in sectors (512 bytes), dev = block device 451 * major / minor, ip[] = {heads, sectors, cylinders}
|
H A D | eata_generic.h | 373 __u32 sectors; /* number of sectors */ member in struct:drive_geom_emul
|
H A D | sr_ioctl.c | 469 * a function to read all sorts of funny cdrom sectors using the READ_CD 521 * read sectors with blocksizes other than 2048
|
H A D | 3w-xxxx.c | 94 1.02.00.008 - Set max sectors per io to TW_MAX_SECTORS in tw_findcards(). 188 1.26.02.001 - Increase max ioctl buffer size to 512 sectors. 1329 /* This funciton returns unit geometry in cylinders/heads/sectors */ tw_scsi_biosparam() 1333 int heads, sectors, cylinders; tw_scsi_biosparam() local 1340 sectors = 32; tw_scsi_biosparam() 1341 cylinders = sector_div(capacity, heads * sectors); tw_scsi_biosparam() 1345 sectors = 63; tw_scsi_biosparam() 1346 cylinders = sector_div(capacity, heads * sectors); tw_scsi_biosparam() 1349 dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders); tw_scsi_biosparam() 1351 geom[1] = sectors; tw_scsi_biosparam()
|
H A D | sd.h | 36 * Number of sectors at the end of the device to avoid multi-sector
|
H A D | gdth.h | 493 u32 ai_size; /* user capacity [sectors] */ 494 u32 ai_striping_size; /* striping size [sectors] */ 657 u32 size; /* size (sectors) */ 668 u8 secs_p_head; /* sectors/head */
|
H A D | hptiop.h | 220 __le16 sectors; member in struct:hpt_iop_request_block_command
|
H A D | BusLogic.c | 3373 Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and 3381 heads and 32 sectors, and drives above 2 GB inclusive are given a disk 3382 geometry of 255 heads and 63 sectors. However, if the BIOS detects that the 3396 if (adapter->ext_trans_enable && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */) { blogic_diskparam() 3397 if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */) { blogic_diskparam() 3399 diskparam->sectors = 63; blogic_diskparam() 3402 diskparam->sectors = 32; blogic_diskparam() 3406 diskparam->sectors = 32; blogic_diskparam() 3408 diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); blogic_diskparam() 3428 diskparam->sectors = 32; blogic_diskparam() 3432 diskparam->sectors = 32; blogic_diskparam() 3436 diskparam->sectors = 63; blogic_diskparam() 3445 diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); blogic_diskparam() 3446 if (part_no < 4 && part_end_sector == diskparam->sectors) { blogic_diskparam() 3448 blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); blogic_diskparam() 3451 blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); blogic_diskparam()
|
H A D | 3w-9xxx.c | 1695 /* This funciton returns unit geometry in cylinders/heads/sectors */ twa_scsi_biosparam() 1698 int heads, sectors, cylinders; twa_scsi_biosparam() local 1705 sectors = 63; twa_scsi_biosparam() 1706 cylinders = sector_div(capacity, heads * sectors); twa_scsi_biosparam() 1709 sectors = 32; twa_scsi_biosparam() 1710 cylinders = sector_div(capacity, heads * sectors); twa_scsi_biosparam() 1714 geom[1] = sectors; twa_scsi_biosparam()
|
H A D | dpt_i2o.c | 487 int sectors=-1; adpt_bios_param() local 495 sectors = 2; adpt_bios_param() 500 sectors = 32; adpt_bios_param() 505 sectors = 63; adpt_bios_param() 510 sectors = 63; adpt_bios_param() 515 sectors = 63; adpt_bios_param() 517 cylinders = sector_div(capacity, heads * sectors); adpt_bios_param() 522 sectors = 63; adpt_bios_param() 527 geom[1] = sectors; adpt_bios_param()
|
H A D | megaraid.c | 73 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 2838 int sectors; megaraid_biosparam() local 2846 /* Default heads (64) & sectors (32) */ megaraid_biosparam() 2848 sectors = 32; megaraid_biosparam() 2849 cylinders = (ulong)capacity / (heads * sectors); megaraid_biosparam() 2857 sectors = 63; megaraid_biosparam() 2858 cylinders = (ulong)capacity / (heads * sectors); megaraid_biosparam() 2863 geom[1] = sectors; megaraid_biosparam() 2881 /* Default heads (64) & sectors (32) */ megaraid_biosparam() 2883 sectors = 32; megaraid_biosparam() 2884 cylinders = (ulong)capacity / (heads * sectors); megaraid_biosparam() 2889 sectors = 63; megaraid_biosparam() 2890 cylinders = (ulong)capacity / (heads * sectors); megaraid_biosparam() 2895 geom[1] = sectors; megaraid_biosparam()
|
H A D | mvumi.c | 2198 int heads, sectors; mvumi_bios_param() local 2203 sectors = 32; mvumi_bios_param() 2204 tmp = heads * sectors; mvumi_bios_param() 2210 sectors = 63; mvumi_bios_param() 2211 tmp = heads * sectors; mvumi_bios_param() 2216 geom[1] = sectors; mvumi_bios_param()
|
H A D | 3w-sas.c | 1410 /* This funciton returns unit geometry in cylinders/heads/sectors */ twl_scsi_biosparam() 1413 int heads, sectors; twl_scsi_biosparam() local 1420 sectors = 63; twl_scsi_biosparam() 1423 sectors = 32; twl_scsi_biosparam() 1427 geom[1] = sectors; twl_scsi_biosparam() 1428 geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ twl_scsi_biosparam()
|
H A D | initio.h | 471 u8 sectors; member in struct:target_control 630 #define NTC_1GIGA 0x40 /* 255 head / 63 sectors (64/32) */
|
H A D | sym53c416.c | 806 ip[1] = 32; /* sectors */ sym53c416_bios_param() 810 ip[1] = 63; /* sectors */ sym53c416_bios_param()
|
H A D | stex.c | 1351 int heads = 255, sectors = 63; stex_biosparam() local 1355 sectors = 32; stex_biosparam() 1358 sector_div(capacity, heads * sectors); stex_biosparam() 1361 geom[1] = sectors; stex_biosparam()
|
H A D | scsi_debug.c | 566 static sector_t sdebug_capacity; /* in sectors */ 572 static int sdebug_sectors_per; /* sectors per cylinder */ 2476 unsigned int sectors, bool read) dif_copy_prot() 2484 resid = sectors * sizeof(*dif_storep); dif_copy_prot() 2519 unsigned int sectors, u32 ei_lba) prot_verify_read() 2525 for (i = 0; i < sectors; i++, ei_lba++) { prot_verify_read() 2541 dif_copy_prot(SCpnt, start_sec, sectors, true); prot_verify_read() 2710 unsigned int sectors, u32 ei_lba) prot_verify_write() 2769 dif_copy_prot(SCpnt, start_sec, sectors, false); prot_verify_write() 4208 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" scsi_debug_show_info() 2475 dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, unsigned int sectors, bool read) dif_copy_prot() argument 2518 prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) prot_verify_read() argument 2709 prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) prot_verify_write() argument
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_sbc.c | 186 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) sbc_get_size() argument 188 return cmd->se_dev->dev_attrib.block_size * sectors; sbc_get_size() 268 unsigned int sectors = sbc_get_write_same_sectors(cmd); sbc_setup_write_same() local 277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { sbc_setup_write_same() 278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", sbc_setup_write_same() 279 sectors, cmd->se_dev->dev_attrib.max_write_same_len); sbc_setup_write_same() 285 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || sbc_setup_write_same() 286 ((cmd->t_task_lba + sectors) > end_lba)) { sbc_setup_write_same() 287 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", sbc_setup_write_same() 288 (unsigned long long)end_lba, cmd->t_task_lba, sectors); sbc_setup_write_same() 316 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); sbc_setup_write_same() 665 u32 sectors, bool is_write) sbc_check_prot() 723 cmd->prot_length = dev->prot_length * sectors; sbc_check_prot() 732 cmd->data_length = sectors * dev->dev_attrib.block_size; sbc_check_prot() 770 u32 sectors = 0; sbc_parse_cdb() local 775 sectors = transport_get_sectors_6(cdb); sbc_parse_cdb() 782 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 788 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sbc_parse_cdb() 797 sectors = transport_get_sectors_12(cdb); sbc_parse_cdb() 803 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sbc_parse_cdb() 812 sectors = transport_get_sectors_16(cdb); sbc_parse_cdb() 818 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sbc_parse_cdb() 827 sectors = transport_get_sectors_6(cdb); sbc_parse_cdb() 835 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 841 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sbc_parse_cdb() 850 sectors = transport_get_sectors_12(cdb); sbc_parse_cdb() 856 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sbc_parse_cdb() 865 sectors = transport_get_sectors_16(cdb); sbc_parse_cdb() 871 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sbc_parse_cdb() 883 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 903 sectors = transport_get_sectors_32(cdb); sbc_parse_cdb() 923 sectors = transport_get_sectors_32(cdb); sbc_parse_cdb() 924 if (!sectors) { sbc_parse_cdb() 925 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" sbc_parse_cdb() 945 sectors = cdb[13]; sbc_parse_cdb() 949 if (sectors > 1) { sbc_parse_cdb() 951 " than 1\n", sectors); sbc_parse_cdb() 958 size = 2 * sbc_get_size(cmd, sectors); sbc_parse_cdb() 960 cmd->t_task_nolb = sectors; sbc_parse_cdb() 989 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 992 sectors = transport_get_sectors_16(cdb); sbc_parse_cdb() 1015 sectors = transport_get_sectors_16(cdb); sbc_parse_cdb() 1016 if (!sectors) { sbc_parse_cdb() 1017 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); sbc_parse_cdb() 1029 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 1030 if (!sectors) { sbc_parse_cdb() 1031 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); sbc_parse_cdb() 1048 sectors = transport_get_sectors_10(cdb); sbc_parse_cdb() 1078 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || sbc_parse_cdb() 1079 ((cmd->t_task_lba + sectors) > end_lba)) { sbc_parse_cdb() 1081 "(lba %llu, sectors %u)\n", sbc_parse_cdb() 1082 end_lba, cmd->t_task_lba, sectors); sbc_parse_cdb() 1087 size = sbc_get_size(cmd, sectors); sbc_parse_cdb() 1275 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, sbc_dif_copy_prot() argument 1287 left = sectors * dev->prot_length; sbc_dif_copy_prot() 1319 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, sbc_dif_verify_write() argument 1370 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); sbc_dif_verify_write() 1377 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, __sbc_dif_verify_read() argument 1439 u32 sectors = cmd->prot_length / dev->prot_length; sbc_dif_read_strip() local 1441 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, sbc_dif_read_strip() 1446 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, sbc_dif_verify_read() argument 1451 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); sbc_dif_verify_read() 1455 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); sbc_dif_verify_read() 664 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, u32 sectors, bool is_write) sbc_check_prot() argument
|
H A D | target_core_rd.c | 416 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; rd_do_prot_rw() local 435 prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, rd_do_prot_rw() 469 rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); rd_do_prot_rw()
|
H A D | target_core_iblock.c | 465 sector_t sectors = sbc_get_write_same_sectors(cmd); iblock_execute_write_same() local 496 while (sectors) { iblock_execute_write_same() 510 sectors -= 1; iblock_execute_write_same()
|
H A D | target_core_file.c | 644 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; fd_execute_rw() local 646 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, fd_execute_rw() 660 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; fd_execute_rw() local 666 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, fd_execute_rw()
|
/linux-4.1.27/include/linux/ |
H A D | amifd.h | 9 #define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */ 15 int sects; /* sectors per track */ 36 unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
|
H A D | drbd_limits.h | 153 #define DRBD_DISK_SIZE_SCALE 's' /* sectors */ 183 #define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */ 196 #define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ 198 #define DRBD_CONG_FILL_SCALE 's' /* sectors */
|
H A D | bio.h | 358 extern struct bio *bio_split(struct bio *bio, int sectors, 362 * bio_next_split - get next @sectors from a bio, splitting if necessary 364 * @sectors: number of sectors to split from the front of @bio 368 * Returns a bio representing the next @sectors of @bio - if the bio is smaller 369 * than @sectors, returns the original bio unchanged. 371 static inline struct bio *bio_next_split(struct bio *bio, int sectors, bio_next_split() argument 374 if (sectors >= bio_sectors(bio)) bio_next_split() 377 return bio_split(bio, sectors, gfp, bs); bio_next_split() 442 void generic_start_io_acct(int rw, unsigned long sectors, 757 unsigned int sectors) bio_integrity_trim() 756 bio_integrity_trim(struct bio *bio, unsigned int offset, unsigned int sectors) bio_integrity_trim() argument
|
H A D | genhd.h | 78 __le32 nr_sects; /* nr of sectors in partition */ 82 unsigned long sectors[2]; /* READs and WRITEs */ member in struct:disk_stats 501 __u32 d_nsectors; /* # of data sectors per track */ 504 __u32 d_secpercyl; /* # of data sectors per cylinder */ 505 __u32 d_secperunit; /* # of data sectors per unit */ 506 __u16 d_sparespertrack; /* # of spare sectors per track */ 507 __u16 d_sparespercyl; /* # of spare sectors per cylinder */ 528 __le32 p_size; /* number of sectors in partition */ 554 __le32 nr_sects; /* number of sectors in slice */ 564 __le32 d_nsectors; /* # of data sectors per track */ 572 __le32 d_phys_sec; /* # of physical sectors per track */
|
H A D | blk_types.h | 33 sectors */ 160 __REQ_DISCARD, /* request to discard sectors */
|
H A D | types.h | 124 * Linux always considers sectors to be 512 bytes long independently
|
H A D | ide.h | 97 * can have no more than 256 sectors, and since the typical blocksize is 98 * two or more sectors, we could get by with a limit of 128 entries here for 273 u8 nsect; /* 2: number of sectors */ 525 u8 sect; /* "real" sectors per track */ 527 u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */ 538 u64 capacity64; /* total number of sectors */ 719 int rqsize; /* max sectors per request */
|
H A D | pktcdvd.h | 30 __u32 size; /* packet size in (512 byte) sectors */
|
H A D | ata.h | 619 /* Offset of logical sectors relative to physical sectors. 625 * and updating "well aligned" (FS perspective) physical sectors on every 840 id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ ata_id_current_chs_valid()
|
H A D | blkdev.h | 873 * blk_rq_sectors() : sectors left in the entire request 874 * blk_rq_cur_sectors() : sectors left in the current segment 1313 /* Why are these in bytes, not sectors? */ queue_limit_discard_alignment() 1319 /* Offset of the partition start in 'granularity' sectors */ queue_limit_discard_alignment()
|
/linux-4.1.27/include/uapi/linux/ |
H A D | msdos_fs.h | 115 __u8 sec_per_clus; /* sectors/cluster */ 116 __le16 reserved; /* reserved sectors */ 119 __u8 sectors[2]; /* number of sectors */ member in struct:fat_boot_sector 121 __le16 fat_length; /* sectors/FAT */ 122 __le16 secs_track; /* sectors per track */ 124 __le32 hidden; /* hidden sectors (unused) */ 125 __le32 total_sect; /* number of sectors (if sectors == 0) */ 142 __le32 length; /* sectors/FAT */
|
H A D | efs_fs_sb.h | 26 __be32 fs_size; /* size of filesystem, in sectors */ 30 __be16 fs_sectors; /* sectors per track */
|
H A D | hdreg.h | 217 #define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ 218 #define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ 220 #define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers */ 221 #define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ 223 #define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ 225 #define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers */ 325 unsigned char sectors; member in struct:hd_geometry 405 unsigned short sectors; /* Obsolete, "physical" sectors per track */ member in struct:hd_driveid 439 unsigned short cur_sectors; /* Obsolete, l sectors per track */ 440 unsigned short cur_capacity0; /* Obsolete, l total sectors on drive */ 444 unsigned int lba_capacity; /* Obsolete, total number of sectors */ 594 unsigned long long lba_capacity_2;/* 48-bit total number of sectors */
|
H A D | bcache.h | 20 /* Btree keys - all units are in sectors */ 154 #define BDEV_DATA_START_DEFAULT 16 /* sectors */ 179 __u16 block_size; /* sectors */ 180 __u16 bucket_size; /* sectors */ 326 __u64 sectors; member in struct:uuid_entry::__anon13129::__anon13130
|
H A D | virtio_blk.h | 57 /* The capacity (in 512-byte sectors). */ 67 __u8 sectors; member in struct:virtio_blk_config::virtio_blk_geometry
|
H A D | fs.h | 125 #define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ 126 #define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
|
H A D | fd.h | 14 unsigned int size, /* nr of sectors total */ 15 sect, /* sectors per track */ 148 unsigned char interleave_sect; /* if there are more sectors, use
|
H A D | fdreg.h | 104 * pack more sectors on a track) */
|
/linux-4.1.27/fs/hpfs/ |
H A D | hpfs.h | 94 __le32 n_dir_band; /* number of sectors in dir band */ 99 __le32 user_id_table; /* 8 preallocated sectors - user id */ 106 /* The spare block has pointers to spare sectors. */ 156 __le32 hotfix_map; /* info about remapped bad sectors */ 173 /* The bad block list is 4 sectors long. The first word must be zero, 179 /* The hotfix map is 4 sectors long. It looks like 185 sectors. The from[] list contains the sector numbers of bad blocks 186 which have been remapped to corresponding sectors in the to[] list. 245 /* Free space bitmaps are 4 sectors long, which is 16384 bits. 246 16384 sectors is 8 meg, and each 8 meg band has a 4-sector bitmap. 249 Bit map sectors are marked allocated in the bit maps, and so are sectors 252 Band 0 is sectors 0-3fff, its map is in sectors 18-1b. 255 The remaining bands have maps in their first (even) or last (odd) 4 sectors 256 -- if the last, partial, band is odd its map is in its last 4 sectors. 262 The "directory band" is a bunch of sectors preallocated for dnodes. 265 the first 4 sectors of the directory band. The entire band is marked 271 /* dnode: directory. 4 sectors long */ 367 /* dnodes point to fnodes which are responsible for listing the sectors 379 __le32 length; /* length, sectors */ 385 __le32 file_secno; /* subtree maps sectors < this */ 430 entry and pointing to the file's sectors or directory's root dnode. EA's
|
H A D | ea.c | 22 ano ? "anode" : "sectors", a, len); hpfs_ea_ext_remove() 29 ano ? "anode" : "sectors", a, pos); hpfs_ea_ext_remove() 100 ano ? "anode" : "sectors", a, len); hpfs_read_ea() 158 ano ? "anode" : "sectors", a, len); hpfs_get_ea() 219 ano ? "anode" : "sectors", a, len); hpfs_set_ea()
|
H A D | buffer.c | 84 /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ 138 /* Don't read sectors */
|
H A D | alloc.c | 196 * 2) search bitmap where free sectors last found 199 * sectors 312 /* Free sectors in bitmaps */
|
H A D | hpfs_fn.h | 65 unsigned sb_fs_size; /* file system size, sectors */
|
/linux-4.1.27/block/ |
H A D | bio-integrity.c | 190 * @sectors: Size of the bio in 512-byte sectors 193 * sectors but integrity metadata is done in terms of the data integrity 194 * interval size of the storage device. Convert the block layer sectors 198 unsigned int sectors) bio_integrity_intervals() 200 return sectors >> (ilog2(bi->interval) - 9); bio_integrity_intervals() 204 unsigned int sectors) bio_integrity_bytes() 206 return bio_integrity_intervals(bi, sectors) * bi->tuple_size; bio_integrity_bytes() 424 * @sectors: number of data sectors 427 * The ivec will be advanced corresponding to 'offset' data sectors 429 * sectors. 432 unsigned int sectors) bio_integrity_trim() 438 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors); bio_integrity_trim() 197 bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) bio_integrity_intervals() argument 203 bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) bio_integrity_bytes() argument 431 bio_integrity_trim(struct bio *bio, unsigned int offset, unsigned int sectors) bio_integrity_trim() argument
|
H A D | blk-lib.c | 33 * @nr_sects: number of sectors to discard 38 * Issue a discard request for the sectors in question. 148 * @nr_sects: number of sectors to write 153 * Issue a write same request for the sectors in question. 222 * @nr_sects: number of sectors to write 284 * @nr_sects: number of sectors to write
|
H A D | blk-settings.c | 61 * Usually queues have static limitations on the max sectors or segments that 237 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 239 * @max_hw_sectors: max hardware sectors in the usual 512b unit 265 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 267 * @max_hw_sectors: max hardware sectors in the usual 512b unit 281 * @chunk_sectors: chunk sectors in the usual 512b unit 286 * must currently be a power-of-2 in sectors. Also note that the block 299 * blk_queue_max_discard_sectors - set max sectors for a single discard 301 * @max_discard_sectors: maximum number of sectors to discard 311 * blk_queue_max_write_same_sectors - set max sectors for a single write same 313 * @max_write_same_sectors: maximum number of sectors to write per command
|
H A D | bio.c | 1707 void generic_start_io_acct(int rw, unsigned long sectors, generic_start_io_acct() argument 1714 part_stat_add(cpu, part, sectors[rw], sectors); generic_start_io_acct() 1813 * @sectors: number of sectors to split from the front of @bio 1817 * Allocates and returns a new bio which represents @sectors from the start of 1818 * @bio, and updates @bio to represent the remaining sectors. 1824 struct bio *bio_split(struct bio *bio, int sectors, bio_split() argument 1829 BUG_ON(sectors <= 0); bio_split() 1830 BUG_ON(sectors >= bio_sectors(bio)); bio_split() 1844 split->bi_iter.bi_size = sectors << 9; bio_split() 1847 bio_integrity_trim(split, 0, sectors); bio_split() 1858 * @offset: number of sectors to trim from the front of @bio 1859 * @size: size we want to trim @bio to, in sectors
|
H A D | partition-generic.c | 127 (unsigned long long)part_stat_read(p, sectors[READ]), part_stat_show() 131 (unsigned long long)part_stat_read(p, sectors[WRITE]), part_stat_show()
|
H A D | cfq-iosched.c | 149 /* Number of sectors dispatched from queue in single dispatch round */ 187 /* total sectors transferred */ 188 struct blkg_stat sectors; member in struct:cfqg_stats 189 /* total disk time and nr sectors dispatched by this group */ 674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); cfqg_stats_update_dispatch() 1100 * by definition, 1KiB is 2 sectors cfq_choose_req() 1533 blkg_stat_init(&stats->sectors); cfqg_stats_init() 1894 .name = "sectors", 1895 .private = offsetof(struct cfq_group, stats.sectors), 1937 .private = offsetof(struct cfq_group, stats.sectors),
|
/linux-4.1.27/block/partitions/ |
H A D | ldm.h | 79 #define LDM_DB_SIZE 2048 /* Size in sectors (= 1MiB). */ 83 device in sectors */ 85 /* Offsets to structures within the LDM Database in sectors. */ 117 struct privhead { /* Offsets and sizes are in sectors. */ 163 u64 size; /* start, size and vol_off in sectors */
|
H A D | ultrix.c | 21 s32 pi_nblocks; /* no. of sectors */ ultrix_partition()
|
H A D | ibm.c | 40 return cyl * geo->heads * geo->sectors + cchh2blk() 41 head * geo->sectors; cchh2blk() 58 return cyl * geo->heads * geo->sectors + cchhb2blk() 59 head * geo->sectors + cchhb2blk() 176 offset + geo->sectors; find_vol1_partitions() 221 * geo->sectors * secperblk; find_lnx1_partitions()
|
H A D | sun.c | 36 __be32 write_reinstruct; /* sectors to skip, writes */ sun_partition() 37 __be32 read_reinstruct; /* sectors to skip, reads */ sun_partition()
|
H A D | aix.c | 76 * physical sectors available on the disk.
|
/linux-4.1.27/drivers/mtd/ |
H A D | ssfdc.c | 26 unsigned char sectors; member in struct:ssfdcr_record 321 ssfdc->sectors = 32; ssfdcr_add_mtd() 322 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); ssfdcr_add_mtd() 324 ((long)ssfdc->sectors * (long)ssfdc->heads)); ssfdcr_add_mtd() 327 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, ssfdcr_add_mtd() 329 (long)ssfdc->sectors); ssfdcr_add_mtd() 332 (long)ssfdc->sectors; ssfdcr_add_mtd() 414 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); ssfdcr_getgeo() 417 geo->sectors = ssfdc->sectors; ssfdcr_getgeo()
|
H A D | sm_ftl.h | 25 struct kfifo free_sectors; /* queue of free sectors */ 61 int sectors; member in struct:sm_ftl
|
H A D | rfd_ftl.c | 75 u_int header_sectors_per_block; /* header sectors in erase unit */ 76 u_int data_sectors_per_block; /* data sectors in erase unit */ 77 u_int sector_count; /* sectors in translated disk */ 468 * more removed sectors is more efficient (have to move reclaim_block() 500 "%d free sectors\n", best_block, reclaim_block() 513 * IMPROVE: It would be best to choose the block with the most deleted sectors, 515 * the least live sectors at reclaim. 751 geo->sectors = SECTORS_PER_TRACK; rfd_ftl_getgeo()
|
H A D | inftlcore.c | 91 inftl->sectors = inftl->mbd.size / temp; inftl_add_mtd() 93 inftl->sectors++; inftl_add_mtd() 94 temp = inftl->cylinders * inftl->sectors; inftl_add_mtd() 99 temp = inftl->heads * inftl->sectors; inftl_add_mtd() 104 if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { inftl_add_mtd() 107 mbd.size == heads * cylinders * sectors inftl_add_mtd() 113 inftl->cylinders, inftl->heads , inftl->sectors, inftl_add_mtd() 115 (long)inftl->sectors ); inftl_add_mtd() 934 geo->sectors = inftl->sectors; inftl_getgeo()
|
H A D | nftlcore.c | 84 nftl->sectors = nftl->mbd.size / temp; nftl_add_mtd() 86 nftl->sectors++; nftl_add_mtd() 87 temp = nftl->cylinders * nftl->sectors; nftl_add_mtd() 92 temp = nftl->heads * nftl->sectors; nftl_add_mtd() 97 if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { nftl_add_mtd() 100 mbd.size == heads * cylinders * sectors nftl_add_mtd() 106 nftl->cylinders, nftl->heads , nftl->sectors, nftl_add_mtd() 108 (long)nftl->sectors ); nftl_add_mtd() 785 geo->sectors = nftl->sectors; nftl_getgeo()
|
H A D | mtdblock.c | 51 * Since typical flash erasable sectors are much larger than what Linux's 53 * sectors for each block write requests. To avoid over-erasing flash sectors
|
H A D | sm_ftl.c | 653 ftl->sectors = chs_table[i].sec; sm_get_media_info() 661 ftl->sectors = 63; sm_get_media_info() 777 /* Allocate memory for free sectors FIFO */ sm_init_zone() 857 /* No free sectors, means that the zone is heavily damaged, write won't sm_init_zone() 969 dbg("no free sectors for write!"); sm_cache_flush() 1123 geo->sectors = ftl->sectors; sm_getgeo()
|
H A D | ftl.c | 783 Read a series of sectors from an FTL partition. 829 Write a series of sectors to an FTL partition 990 geo->sectors = 8; ftl_getgeo() 1014 pr_debug("FTL erase sector %ld for %d sectors\n", ftl_discardsect()
|
/linux-4.1.27/include/linux/spi/ |
H A D | flash.h | 18 * Note that for DataFlash, sizes for pages, blocks, and sectors are
|
/linux-4.1.27/fs/hfsplus/ |
H A D | btree.c | 75 u64 sectors, int file_id) hfsplus_calc_btree_clump_size() 99 if (sectors < 0x200000) { hfsplus_calc_btree_clump_size() 100 clump_size = sectors << 2; /* 0.8 % */ hfsplus_calc_btree_clump_size() 105 for (i = 0, sectors = sectors >> 22; hfsplus_calc_btree_clump_size() 106 sectors && (i < CLUMP_ENTRIES - 1); hfsplus_calc_btree_clump_size() 107 ++i, sectors = sectors >> 1) { hfsplus_calc_btree_clump_size() 74 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors, int file_id) hfsplus_calc_btree_clump_size() argument
|
/linux-4.1.27/include/uapi/linux/raid/ |
H A D | md_p.h | 231 __le64 size; /* used size of component devices, in 512byte sectors */ 233 __le32 chunksize; /* in 512byte sectors */ 235 __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts 245 __le32 new_chunk; /* new chunk size (512byte sectors) */ 253 __le64 data_size; /* sectors in this device that can be used for data */ 255 __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ 264 __u8 bblog_shift; /* shift from sectors to block size */ 265 __le16 bblog_size; /* number of sectors reserved for list */
|
/linux-4.1.27/drivers/md/bcache/ |
H A D | alloc.c | 84 void bch_rescale_priorities(struct cache_set *c, int sectors) bch_rescale_priorities() argument 92 atomic_sub(sectors, &c->rescale); bch_rescale_priorities() 161 * first: we also take into account the number of sectors of live data in that 566 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many 567 * sectors were actually allocated. 571 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, bch_alloc_sectors() argument 617 sectors = min(sectors, b->sectors_free); bch_alloc_sectors() 619 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); bch_alloc_sectors() 620 SET_KEY_SIZE(k, sectors); bch_alloc_sectors() 631 b->sectors_free -= sectors; bch_alloc_sectors() 634 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); bch_alloc_sectors() 636 atomic_long_add(sectors, bch_alloc_sectors()
|
H A D | stats.c | 221 int sectors) bch_mark_sectors_bypassed() 223 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); bch_mark_sectors_bypassed() 224 atomic_add(sectors, &c->accounting.collector.sectors_bypassed); bch_mark_sectors_bypassed() 220 bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, int sectors) bch_mark_sectors_bypassed() argument
|
H A D | request.c | 120 pr_debug("invalidating %i sectors from %llu", bch_data_invalidate() 124 unsigned sectors = min(bio_sectors(bio), bch_data_invalidate() local 130 bio->bi_iter.bi_sector += sectors; bch_data_invalidate() 131 bio->bi_iter.bi_size -= sectors << 9; bch_data_invalidate() 134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); bch_data_invalidate() 369 unsigned sectors, congested = bch_get_congested(c); check_should_bypass() local 430 sectors = max(task->sequential_io, check_should_bypass() 434 sectors >= dc->sequential_cutoff >> 9) { check_should_bypass() 439 if (congested && sectors >= congested) { check_should_bypass() 515 unsigned sectors = KEY_INODE(k) == s->iop.inode cache_lookup_fn() local 520 int ret = s->d->cache_miss(b, s, bio, sectors); cache_lookup_fn() 525 BUG_ON(bio_sectors <= sectors); cache_lookup_fn() 777 struct bio *bio, unsigned sectors) cached_dev_cache_miss() 785 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); cached_dev_cache_miss() 796 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); cached_dev_cache_miss() 808 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); cached_dev_cache_miss() 1038 struct bio *bio, unsigned sectors) flash_dev_cache_miss() 1040 unsigned bytes = min(sectors, bio_sectors(bio)) << 9; flash_dev_cache_miss() 776 cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) cached_dev_cache_miss() argument 1037 flash_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) flash_dev_cache_miss() argument
|
H A D | journal.c | 586 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * variable 622 atomic_long_add(sectors, &ca->meta_sectors_written); 628 bio->bi_iter.bi_size = sectors << 9; 637 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors); 681 size_t sectors; journal_wait_for_write() local 692 sectors = __set_blocks(w->data, w->data->keys + nkeys, journal_wait_for_write() 695 if (sectors <= min_t(size_t, journal_wait_for_write()
|
H A D | writeback.c | 40 /* Scale to sectors per second */ __update_writeback_rate() 90 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) writeback_delay() argument 96 return bch_next_delay(&dc->writeback_rate, sectors); writeback_delay()
|
H A D | super.c | 433 u1[i].sectors = 0; uuid_read() 778 sector_t sectors) bcache_device_init() 787 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); bcache_device_init() 821 set_capacity(d->disk, sectors); bcache_device_init() 859 uint64_t sectors = 0; calc_cached_dev_sectors() local 863 sectors += bdev_sectors(dc->bdev); calc_cached_dev_sectors() 865 c->cached_dev_sectors = sectors; calc_cached_dev_sectors() 1257 if (bcache_device_init(d, block_bytes(c), u->sectors)) flash_dev_run() 1310 u->sectors = size >> 9; bch_flash_dev_create() 777 bcache_device_init(struct bcache_device *d, unsigned block_size, sector_t sectors) bcache_device_init() argument
|
H A D | sysfs.c | 348 sysfs_hprint(size, u->sectors << 9); SHOW() 372 u->sectors = v >> 9; STORE() 374 set_capacity(d->disk, u->sectors); STORE()
|
H A D | bcache.h | 466 uint64_t data; /* sectors */ 523 /* log2(bucket_size), in sectors */ 526 /* log2(block_size), in sectors */ 581 * For any bio we don't skip we subtract the number of sectors from 611 /* Counts how many sectors bio_insert has added to the cache */
|
H A D | extents.c | 314 int sectors) bch_subtract_dirty() 318 offset, -sectors); bch_subtract_dirty() 311 bch_subtract_dirty(struct bkey *k, struct cache_set *c, uint64_t offset, int sectors) bch_subtract_dirty() argument
|
H A D | bset.h | 33 * We also implement functions here for removing some number of sectors from the 35 * extents, by removing the overlapping sectors from the older key.
|
/linux-4.1.27/drivers/ps3/ |
H A D | ps3stor_lib.c | 270 * @sectors: Number of sectors to read/write 277 u64 start_sector, u64 sectors, int write) ps3stor_read_write_sectors() 283 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", ps3stor_read_write_sectors() 284 __func__, __LINE__, op, sectors, start_sector); ps3stor_read_write_sectors() 288 start_sector, sectors, 0, lpar, ps3stor_read_write_sectors() 291 start_sector, sectors, 0, lpar, ps3stor_read_write_sectors() 276 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar, u64 start_sector, u64 sectors, int write) ps3stor_read_write_sectors() argument
|
/linux-4.1.27/drivers/usb/storage/ |
H A D | datafab.c | 68 unsigned long sectors; /* total sector count */ member in struct:datafab_info 146 u32 sectors) datafab_read_data() 161 if (sectors > 0x0FFFFFFF) datafab_read_data() 170 totallen = sectors * info->ssize; datafab_read_data() 229 u32 sectors) datafab_write_data() 245 if (sectors > 0x0FFFFFFF) datafab_write_data() 254 totallen = sectors * info->ssize; datafab_write_data() 429 info->sectors = ((u32)(reply[117]) << 24) | datafab_id_device() 585 info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec datafab_transport() 590 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", datafab_transport() 591 info->sectors, info->ssize); datafab_transport() 594 // we need the last sector, not the number of sectors datafab_transport() 595 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); datafab_transport() 143 datafab_read_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_read_data() argument 226 datafab_write_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_write_data() argument
|
H A D | jumpshot.c | 105 unsigned long sectors; /* total sector count */ member in struct:jumpshot_info 165 u32 sectors) jumpshot_read_data() 183 totallen = sectors * info->ssize; jumpshot_read_data() 242 u32 sectors) jumpshot_write_data() 260 totallen = sectors * info->ssize; jumpshot_write_data() 363 info->sectors = ((u32)(reply[117]) << 24) | jumpshot_id_device() 511 info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec jumpshot_transport() 521 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", jumpshot_transport() 522 info->sectors, info->ssize); jumpshot_transport() 526 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); jumpshot_transport() 162 jumpshot_read_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) jumpshot_read_data() argument 239 jumpshot_write_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) jumpshot_write_data() argument
|
H A D | sddr55.c | 197 unsigned short sectors) { sddr55_read_data() 216 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> sddr55_read_data() 224 while (sectors>0) { sddr55_read_data() 232 // Read as many sectors as possible in this block sddr55_read_data() 234 pages = min((unsigned int) sectors << info->smallpageshift, sddr55_read_data() 302 sectors -= pages >> info->smallpageshift; sddr55_read_data() 316 unsigned short sectors) { sddr55_write_data() 343 len = min((unsigned int) sectors, (unsigned int) info->blocksize >> sddr55_write_data() 351 while (sectors > 0) { sddr55_write_data() 359 // Write as many sectors as possible in this block sddr55_write_data() 361 pages = min((unsigned int) sectors << info->smallpageshift, sddr55_write_data() 510 sectors -= pages >> info->smallpageshift; sddr55_write_data() 194 sddr55_read_data(struct us_data *us, unsigned int lba, unsigned int page, unsigned short sectors) sddr55_read_data() argument 313 sddr55_write_data(struct us_data *us, unsigned int lba, unsigned int page, unsigned short sectors) sddr55_write_data() argument
|
H A D | alauda.c | 914 unsigned int sectors) alauda_read_data() 935 len = min(sectors, blocksize) * (pagesize + 64); alauda_read_data() 951 while (sectors > 0) { alauda_read_data() 967 pages = min(sectors, blocksize - page); alauda_read_data() 998 sectors -= pages; alauda_read_data() 1009 unsigned int sectors) alauda_write_data() 1027 len = min(sectors, blocksize) * pagesize; alauda_write_data() 1054 while (sectors > 0) { alauda_write_data() 1055 /* Write as many sectors as possible in this block */ alauda_write_data() 1056 unsigned int pages = min(sectors, blocksize - page); alauda_write_data() 1078 sectors -= pages; alauda_write_data() 913 alauda_read_data(struct us_data *us, unsigned long address, unsigned int sectors) alauda_read_data() argument 1008 alauda_write_data(struct us_data *us, unsigned long address, unsigned int sectors) alauda_write_data() argument
|
H A D | shuttle_usbat.c | 143 unsigned long sectors; /* total sector count */ member in struct:usbat_info 203 * Convenience function to produce an ATA read/write sectors command 1099 info->sectors = ((u32)(reply[117]) << 24) | usbat_flash_get_sector_count() 1117 u32 sectors) usbat_flash_read_data() 1150 totallen = sectors * info->ssize; usbat_flash_read_data() 1208 u32 sectors) usbat_flash_write_data() 1241 totallen = sectors * info->ssize; usbat_flash_write_data() 1369 /* Fix up the SCSI command sector and num sectors */ usbat_hp8200e_handle_read10() 1378 data[7+8] = LSB_of(len / srb->transfersize); /* num sectors */ usbat_hp8200e_handle_read10() 1712 /* hard coded 512 byte sectors as per ATA spec */ usbat_flash_transport() 1714 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", usbat_flash_transport() 1715 info->sectors, info->ssize); usbat_flash_transport() 1720 * *not* the total number of sectors usbat_flash_transport() 1722 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); usbat_flash_transport() 1114 usbat_flash_read_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) usbat_flash_read_data() argument 1205 usbat_flash_write_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) usbat_flash_write_data() argument
|
H A D | sddr09.c | 572 * bytes 10-11: sector count (big-endian, in 512-byte sectors). 742 unsigned int sectors) { sddr09_read_data() 763 len = min(sectors, (unsigned int) info->blocksize) * info->pagesize; sddr09_read_data() 777 while (sectors > 0) { sddr09_read_data() 780 pages = min(sectors, info->blocksize - page); sddr09_read_data() 825 sectors -= pages; sddr09_read_data() 968 unsigned int sectors) { sddr09_write_data() 1004 len = min(sectors, (unsigned int) info->blocksize) * info->pagesize; sddr09_write_data() 1016 while (sectors > 0) { sddr09_write_data() 1018 // Write as many sectors as possible in this block sddr09_write_data() 1020 pages = min(sectors, info->blocksize - page); sddr09_write_data() 1042 sectors -= pages; sddr09_write_data() 740 sddr09_read_data(struct us_data *us, unsigned long address, unsigned int sectors) sddr09_read_data() argument 966 sddr09_write_data(struct us_data *us, unsigned long address, unsigned int sectors) sddr09_write_data() argument
|
H A D | scsiglue.c | 97 * hardware sectors that are multiples of 512 bytes in length, slave_alloc() 155 * of sectors, we will always enable the CAPACITY_HEURISTICS slave_configure()
|
/linux-4.1.27/arch/ia64/hp/sim/ |
H A D | simscsi.c | 118 ip[1] = 32; /* sectors */ simscsi_biosparam() 171 size_t bit, sectors = 0; simscsi_get_disk_size() local 183 ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ); simscsi_get_disk_size() 187 sectors |= bit; simscsi_get_disk_size() 189 return sectors - 1; /* return last valid sector number */ simscsi_get_disk_size()
|
/linux-4.1.27/drivers/block/ |
H A D | ps3disk.c | 104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n", rq_for_each_segment() 127 u64 start_sector, sectors; ps3disk_submit_request_sg() local 138 "%s:%u: %s req has %u bvecs for %u sectors\n", ps3disk_submit_request_sg() 143 sectors = blk_rq_sectors(req) * priv->blocking_factor; ps3disk_submit_request_sg() 144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", ps3disk_submit_request_sg() 145 __func__, __LINE__, op, sectors, start_sector); ps3disk_submit_request_sg() 151 start_sector, sectors, 0, ps3disk_submit_request_sg() 155 start_sector, sectors, 0, ps3disk_submit_request_sg()
|
H A D | cpqarray.h | 52 unsigned sectors; member in struct:__anon3565
|
H A D | mg_disk.c | 123 u16 sectors; member in struct:mg_host 376 host->sectors = id[ATA_ID_SECTORS]; mg_get_disk_id() 378 if (MG_RES_SEC && host->heads && host->sectors) { mg_get_disk_id() 381 host->heads / host->sectors; mg_get_disk_id() 383 host->heads * host->sectors; mg_get_disk_id() 393 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n", mg_get_disk_id() 775 geo->sectors = (unsigned char)host->sectors; mg_getgeo()
|
H A D | xen-blkfront.c | 341 hg->sectors = 0x3f; blkif_getgeo() 342 sector_div(cylinders, hg->heads * hg->sectors); blkif_getgeo() 344 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) blkif_getgeo() 676 /* Hard sector size and max sectors impersonate the equiv. hardware. */ xlvbd_init_blk_queue() 1765 * the details about the physical device - #sectors, size, etc). 1769 unsigned long long sectors; blkfront_connect() local 1783 "sectors", "%Lu", §ors); blkfront_connect() 1787 sectors); blkfront_connect() 1788 set_capacity(info->gd, sectors); blkfront_connect() 1810 "sectors", "%llu", §ors, blkfront_connect() 1879 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, blkfront_connect()
|
H A D | hd.c | 62 #define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ 624 printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", hd_request() 666 geo->sectors = disk->sect; hd_getgeo() 730 printk("hd: no drives specified - use hd=cyl,head,sectors" hd_init()
|
H A D | umem.c | 61 #define MM_RAHEAD 2 /* two sectors */ 63 #define MM_HARDSECT 512 /* 512-byte hardware sectors */ 106 unsigned int init_size; /* initial segment, in sectors, 774 * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, mm_getgeo() 778 geo->sectors = 32; mm_getgeo() 779 geo->cylinders = size / (geo->heads * geo->sectors); mm_getgeo()
|
H A D | floppy.c | 87 * by defining bit 1 of the "stretch" parameter to mean put sectors on the 379 | | | | | | | | | | | | Max nonintlv. sectors 422 * tells if the disk is in Commodore 1581 format, which means side 0 sectors 2138 /* place logical sectors */ setup_format_params() 2221 /* new request_done. Can handle physical sectors which are smaller than a 2405 int remaining; /* number of transferred 512-byte sectors */ copy_buffer() 2499 pr_info("too many sectors %d > %d\n", virtualdmabug_workaround() 2564 /* 2M disks have phantom sectors on the first track */ make_raw_rw_request() 2592 /* tracksize describes the size which can be filled up with sectors make_raw_rw_request() 2601 /* if we are beyond tracksize, fill up using smaller sectors */ make_raw_rw_request() 2632 unsigned int sectors; make_raw_rw_request() local 2634 sectors = fsector_t + blk_rq_sectors(current_req); make_raw_rw_request() 2635 if (sectors > ssize && sectors < ssize + ssize) make_raw_rw_request() 2671 * are other bad sectors on this track. make_raw_rw_request() 2781 DPRINT("more sectors than bytes\n"); make_raw_rw_request() 2783 pr_info("sectors=%ld\n", current_count_sectors); make_raw_rw_request() 3285 * when there are already sectors in the buffer cache set_geometry() 3376 geo->sectors = g->sect; fd_getgeo()
|
H A D | virtio_blk.c | 289 geometry.sectors, &geo->sectors); virtblk_getgeo() 293 geo->sectors = 1 << 5; virtblk_getgeo()
|
H A D | ataflop.c | 23 * - After errors in multiple read mode try again reading single sectors 91 unsigned spt; /* sectors per track */ 99 { "D820", 10,1640, 0, 0}, /* 3: DD disk with 82 tracks/10 sectors */ 741 /* all sectors finished */ do_fd_action() 936 /* How many sectors for DMA */ fd_rwsec() 956 * then check if all sectors are read. The FDC will even fd_rwsec() 1146 /* all sectors finished */ fd_rwsec_done1() 1201 /* How many sectors for DMA */ fd_writetrack()
|
H A D | sunvdc.c | 117 geo->sectors = 0x3f; vdc_getgeo() 118 sector_div(cylinders, geo->heads * geo->sectors); vdc_getgeo() 120 if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) vdc_getgeo() 833 pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", probe_disk()
|
H A D | cpqarray.c | 951 DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); ); do_ida_request() 1129 geo->sectors = drv->sectors; ida_getgeo() 1133 geo->sectors = 0x3f; ida_getgeo() 1761 drv->sectors = id_ldrive->drv.sect_per_track; getgeometry()
|
H A D | cciss.h | 39 int sectors; member in struct:_drive_info_struct
|
H A D | cciss.c | 1270 geo->sectors = drv->sectors; cciss_getgeo() 2044 drvinfo->sectors == h->drv[drv_index]->sectors && cciss_update_drive_info() 2085 h->drv[drv_index]->sectors = drvinfo->sectors; cciss_update_drive_info() 2255 h->drv[drv_index]->sectors = 0; cciss_add_controller_node() 2417 drive_info->sectors = 0; cciss_clear_drive_info() 2860 drv->sectors = 32; /* Sectors per track */ cciss_geometry_inquiry() 2865 drv->sectors = inq_buff->data_byte[7]; cciss_geometry_inquiry() 2872 t = drv->heads * drv->sectors; cciss_geometry_inquiry() 3378 dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " do_cciss_request()
|
H A D | swim3.c | 80 REG(nsect); /* # sectors to read or write */ 190 int scount; /* # sectors we're transferring at present */ 458 swim3_warn("%s", "Transfer 0 sectors ?\n"); setup_transfer()
|
/linux-4.1.27/drivers/mmc/card/ |
H A D | mmc_test.c | 90 * @count: amount of group of sectors to check 91 * @sectors: amount of sectors to check in one group 99 unsigned int sectors; member in struct:mmc_test_transfer_result 523 unsigned int count, unsigned int sectors, struct timespec ts, mmc_test_save_transfer_result() 536 tr->sectors = sectors; mmc_test_save_transfer_result() 550 unsigned int rate, iops, sectors = bytes >> 9; mmc_test_print_rate() local 558 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " mmc_test_print_rate() 560 mmc_hostname(test->card->host), sectors, sectors >> 1, mmc_test_print_rate() 561 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, mmc_test_print_rate() 565 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); mmc_test_print_rate() 575 unsigned int rate, iops, sectors = bytes >> 9; mmc_test_print_avg_rate() local 584 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " mmc_test_print_avg_rate() 587 mmc_hostname(test->card->host), count, sectors, count, mmc_test_print_avg_rate() 588 sectors >> 1, (sectors & 1 ? ".5" : ""), mmc_test_print_avg_rate() 593 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); mmc_test_print_avg_rate() 597 * Return the card size in sectors. 602 return card->ext_csd.sectors; mmc_test_capacity() 612 * Fill the first couple of sectors of the card with known data 929 int sectors; mmc_test_transfer() local 935 sectors = (blocks * blksz + 511) / 512; mmc_test_transfer() 936 if ((sectors * 512) == (blocks * blksz)) mmc_test_transfer() 937 sectors++; mmc_test_transfer() 939 if ((sectors * 512) > BUFFER_SIZE) mmc_test_transfer() 942 memset(test->buffer, 0, sectors * 512); mmc_test_transfer() 944 for (i = 0;i < sectors;i++) { mmc_test_transfer() 957 for (;i < sectors * 512;i++) { mmc_test_transfer() 2827 tr->count, tr->sectors, mtf_test_show() 522 mmc_test_save_transfer_result(struct mmc_test_card *test, unsigned int count, unsigned int sectors, struct timespec ts, unsigned int rate, unsigned int iops) mmc_test_save_transfer_result() argument
|
H A D | block.c | 329 geo->sectors = 16; mmc_blk_getgeo() 1346 pr_err("%s: packed cmd failed, nr %u, sectors %u, " mmc_blk_packed_err_check() 1405 * sectors can be read successfully. mmc_blk_rw_rq_prep() 1738 * mark the known good sectors as ok. mmc_blk_cmd_err() 1740 * If the card is not SD, we can still ok written sectors mmc_blk_cmd_err() 1742 * the real number of written sectors, but never more). mmc_blk_cmd_err() 2216 * sectors. mmc_blk_alloc() 2218 size = card->ext_csd.sectors; mmc_blk_alloc()
|
/linux-4.1.27/drivers/block/rsxx/ |
H A D | dev.c | 51 MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO"); 96 geo->sectors = 16; rsxx_getgeo() 97 do_div(blocks, (geo->heads * geo->sectors)); rsxx_getgeo() 101 geo->sectors = 0; rsxx_getgeo()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | ps3stor.h | 66 u64 start_sector, u64 sectors,
|
/linux-4.1.27/include/linux/mtd/ |
H A D | inftl.h | 34 unsigned char sectors; member in struct:INFTLrecord
|
H A D | nftl.h | 41 unsigned char sectors; member in struct:NFTLrecord
|
/linux-4.1.27/drivers/block/paride/ |
H A D | pd.c | 44 a logical geometry with 64 heads and 32 sectors 77 byte sectors) is set with this parameter. 227 int capacity; /* Size of this volume in sectors */ 229 int sectors; member in struct:pd_unit 341 s = (block % disk->sectors) + 1; pd_ide_command() 342 h = (block /= disk->sectors) % disk->heads; pd_ide_command() 439 static int pd_run; /* sectors in current cluster */ 590 pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0, pd_init_dev_parms() 672 disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12)); pd_identify() 678 disk->capacity = disk->sectors * disk->heads * disk->cylinders; pd_identify() 694 disk->cylinders, disk->heads, disk->sectors, pd_identify() 760 geo->sectors = PD_LOG_SECTS; pd_getgeo() 761 geo->cylinders = disk->capacity / (geo->heads * geo->sectors); pd_getgeo() 764 geo->sectors = disk->sectors; pd_getgeo()
|
H A D | pf.c | 75 byte sectors) is set with this parameter. 262 static int pf_run; /* sectors in current cluster */ 337 geo->sectors = PF_FD_SPT; pf_getgeo() 341 geo->sectors = PF_HD_SPT; pf_getgeo()
|
/linux-4.1.27/drivers/scsi/aacraid/ |
H A D | linit.c | 296 * The default disk geometry is 64 heads, 32 sectors, and the appropriate 302 * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive 303 * are given a disk geometry of 255 heads and 63 sectors. However, if 321 if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */ aac_biosparm() 322 if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */ aac_biosparm() 324 param->sectors = 63; aac_biosparm() 327 param->sectors = 32; aac_biosparm() 331 param->sectors = 32; aac_biosparm() 334 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); aac_biosparm() 358 param->sectors = 32; aac_biosparm() 362 param->sectors = 32; aac_biosparm() 366 param->sectors = 63; aac_biosparm() 377 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); aac_biosparm() 378 if (num < 4 && end_sec == param->sectors) { aac_biosparm() 380 dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n", aac_biosparm() 381 param->heads, param->sectors, num)); aac_biosparm() 383 dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n", aac_biosparm() 385 dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n", aac_biosparm() 386 param->heads, param->sectors)); aac_biosparm()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | sgiarcs.h | 333 unsigned char sect_clust; /* sectors per cluster */ 334 unsigned short sect_resv; /* reserved sectors */ 337 unsigned short sect_volume; /* sectors in volume */ 339 unsigned short sect_fat; /* sectors per allocation table */ 340 unsigned short sect_track; /* sectors per track */ 342 unsigned short nhsects; /* # of hidden sectors */
|
/linux-4.1.27/include/linux/mmc/ |
H A D | card.h | 38 unsigned int erase_size; /* In sectors */ 72 unsigned int sectors; member in struct:mmc_ext_csd 73 unsigned int hc_erase_size; /* In sectors */ 137 unsigned int au; /* In sectors */ 282 unsigned int erase_size; /* erase size in sectors */ 284 unsigned int pref_erase; /* in sectors */
|
/linux-4.1.27/drivers/ide/ |
H A D | ide-disk.c | 79 * using LBA if supported, or CHS otherwise, to address sectors. 191 pr_debug("%s: %sing: block=%llu, sectors=%u\n", ide_do_rw_disk() 287 * Some disks report total number of sectors instead of 339 "\tcurrent capacity is %llu sectors (%llu MB)\n" idedisk_check_hpa() 340 "\tnative capacity is %llu sectors (%llu MB)\n", idedisk_check_hpa() 390 "%llu sectors (%llu MB)\n", ide_disk_get_capacity() 400 " will be used for accessing sectors " ide_disk_get_capacity() 725 printk(KERN_INFO "%s: %llu sectors (%llu MB)", ide_disk_setup()
|
H A D | pdc202xx_old.c | 258 #define DECLARE_PDC2026X_DEV(udma, sectors) \ 269 .max_sectors = sectors, \ 286 /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
|
H A D | ide-floppy.c | 317 u8 heads, sectors; ide_floppy_get_flexible_disk_page() local 340 sectors = buf[8 + 5]; ide_floppy_get_flexible_disk_page() 342 capacity = cyls * heads * sectors * sector_size; ide_floppy_get_flexible_disk_page() 348 sectors, transfer_rate / 8, sector_size, rpm); ide_floppy_get_flexible_disk_page() 353 drive->bios_sect = sectors; ide_floppy_get_flexible_disk_page()
|
H A D | ide-gd.c | 281 geo->sectors = drive->bios_sect; ide_gd_getgeo()
|
/linux-4.1.27/drivers/scsi/dpt/ |
H A D | dpti_ioctl.h | 94 unsigned sectors; /* sectors for drives on cntlr. */ member in struct:__anon8785
|
H A D | sys_info.h | 84 uCHAR sectors; /* Up to 63 */ member in struct:driveParam_S::__anon8788
|
/linux-4.1.27/fs/ocfs2/ |
H A D | ocfs1_fs_compat.h | 7 * OCFS1 volume headers on the first two sectors of an OCFS2 volume.
|
/linux-4.1.27/include/trace/events/ |
H A D | bcache.h | 410 __field(unsigned, sectors ) 418 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); 421 TP_printk("invalidated %u sectors at %d,%d sector=%llu", 422 __entry->sectors, MAJOR(__entry->dev),
|
H A D | block.h | 105 * can be examined to determine which device and sectors the pending 220 * be examined to determine which device and sectors the pending
|
/linux-4.1.27/fs/ntfs/ |
H A D | mst.c | 89 /* Fixup all sectors. */ post_read_mst_fixup() 156 /* Fixup all sectors. */ pre_write_mst_fixup() 192 /* Fixup all sectors. */ post_write_mst_fixup()
|
/linux-4.1.27/drivers/mtd/chips/ |
H A D | cfi_cmdset_0002.c | 111 "No WP", "8x8KiB sectors at top & bottom, no WP", cfi_tell_features() 128 printk(" Block protection: %d sectors per group\n", extp->BlkProt); cfi_tell_features() 266 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 322 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where fixup_sst38vf640x_sectorsize() 336 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); fixup_s29gl064n_sectors() 347 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); fixup_s29gl032n_sectors() 357 * S29NS512P flash uses more than 8bits to report number of sectors, fixup_s29ns512p_sectors() 361 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); fixup_s29ns512p_sectors() 2575 * have to unlock all sectors of this device instead do_ppb_xxlock() 2588 * Wait for some time as unlocking of all sectors takes quite long do_ppb_xxlock() 2634 int sectors; cfi_ppb_unlock() local 2638 * PPB unlocking always unlocks all sectors of the flash chip. cfi_ppb_unlock() 2639 * We need to re-lock all previously locked sectors. So lets cfi_ppb_unlock() 2640 * first check the locking status of all sectors and save cfi_ppb_unlock() 2648 * This code to walk all sectors is a slightly modified version cfi_ppb_unlock() 2654 sectors = 0; cfi_ppb_unlock() 2662 * Only test sectors that shall not be unlocked. The other cfi_ppb_unlock() 2663 * sectors shall be unlocked, so lets keep their locking cfi_ppb_unlock() 2667 sect[sectors].chip = &cfi->chips[chipnum]; cfi_ppb_unlock() 2668 sect[sectors].offset = offset; cfi_ppb_unlock() 2669 sect[sectors].locked = do_ppb_xxlock( cfi_ppb_unlock() 2689 sectors++; cfi_ppb_unlock() 2690 if (sectors >= MAX_SECTORS) { cfi_ppb_unlock() 2691 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", cfi_ppb_unlock() 2707 * PPB unlocking always unlocks all sectors of the flash chip. cfi_ppb_unlock() 2708 * We need to re-lock all previously locked sectors. cfi_ppb_unlock() 2710 for (i = 0; i < sectors; i++) { cfi_ppb_unlock()
|
/linux-4.1.27/drivers/block/aoe/ |
H A D | aoeblk.c | 32 "When nonzero, set the maximum number of sectors per I/O request"); 308 geo->sectors = d->geo.sectors; aoeblk_getgeo()
|
H A D | aoecmd.c | 1005 /* word 100: number lba48 sectors */ ataid_complete() 1012 d->geo.sectors = 63; ataid_complete() 1016 /* number lba28 sectors */ ataid_complete() 1022 d->geo.sectors = get_unaligned_le16(&id[56 << 1]); ataid_complete() 1032 "aoe: %pm e%ld.%d v%04x has %llu sectors\n", ataid_complete()
|
H A D | aoe.h | 84 DEFAULTBCNT = 2 * 512, /* 2 sectors */
|
/linux-4.1.27/drivers/ata/ |
H A D | libata-core.c | 100 u16 heads, u16 sectors); 725 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; ata_tf_read_block() 829 track = (u32)block / dev->sectors; ata_build_rw_tf() 832 sect = (u32)block % dev->sectors + 1; ata_build_rw_tf() 844 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ ata_build_rw_tf() 1186 u64 sectors = 0; ata_tf_to_lba48() local 1188 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; ata_tf_to_lba48() 1189 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; ata_tf_to_lba48() 1190 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; ata_tf_to_lba48() 1191 sectors |= (tf->lbah & 0xff) << 16; ata_tf_to_lba48() 1192 sectors |= (tf->lbam & 0xff) << 8; ata_tf_to_lba48() 1193 sectors |= (tf->lbal & 0xff); ata_tf_to_lba48() 1195 return sectors; ata_tf_to_lba48() 1200 u64 sectors = 0; ata_tf_to_lba() local 1202 sectors |= (tf->device & 0x0f) << 24; ata_tf_to_lba() 1203 sectors |= (tf->lbah & 0xff) << 16; ata_tf_to_lba() 1204 sectors |= (tf->lbam & 0xff) << 8; ata_tf_to_lba() 1205 sectors |= (tf->lbal & 0xff); ata_tf_to_lba() 1207 return sectors; ata_tf_to_lba() 1262 * ata_set_max_sectors - Set max sectors 1264 * @new_sectors: new max sectors value to set for the device 1266 * Set max sectors of @dev to @new_sectors. 1335 u64 sectors = ata_id_n_sectors(dev->id); ata_hpa_resize() local 1366 if (native_sectors <= sectors || !unlock_hpa) { ata_hpa_resize() 1367 if (!print_info || native_sectors == sectors) ata_hpa_resize() 1370 if (native_sectors > sectors) ata_hpa_resize() 1373 (unsigned long long)sectors, ata_hpa_resize() 1375 else if (native_sectors < sectors) ata_hpa_resize() 1377 "native sectors (%llu) is smaller than sectors (%llu)\n", ata_hpa_resize() 1379 (unsigned long long)sectors); ata_hpa_resize() 1389 (unsigned long long)sectors, ata_hpa_resize() 1408 (unsigned long long)sectors, ata_hpa_resize() 2243 dev->sectors = 0; ata_dev_configure() 2317 "%llu sectors, multi %u: %s %s\n", ata_dev_configure() 2327 dev->sectors = id[6]; ata_dev_configure() 2333 dev->sectors = id[56]; ata_dev_configure() 2342 "%llu sectors, multi %u, CHS %u/%u/%u\n", ata_dev_configure() 2345 dev->heads, dev->sectors); ata_dev_configure() 2445 200 sectors */ ata_dev_configure() 4081 /* restore original n_[native_]sectors and fail */ ata_dev_revalidate() 4550 * @sectors: Number of sectors (taskfile parameter) 4559 u16 heads, u16 sectors) ata_dev_init_params() 4564 /* Number of sectors per track 1-255. Number of heads 1-16 */ ata_dev_init_params() 4565 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) ata_dev_init_params() 4575 tf.nsect = sectors; ata_dev_init_params() 4558 ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors) ata_dev_init_params() argument
|
H A D | sata_sil.c | 264 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 607 * For certain Seagate devices, we must limit the maximum sectors 641 /* limit requests to 15 sectors */ sil_dev_config()
|
H A D | libata.h | 167 u8 page, void *buf, unsigned int sectors); ata_acpi_bind_dev()
|
H A D | pata_ns87415.c | 329 /* Select 512 byte sectors */ ns87415_fixup()
|
/linux-4.1.27/include/xen/interface/io/ |
H A D | blkif.h | 79 * sectors to be discarded. The specified sectors should be discarded if the 99 * 'discard-secure' - All copies of the discarded sectors (potentially created
|
/linux-4.1.27/arch/parisc/lib/ |
H A D | io.c | 162 * IDE driver to read disk sectors. Performance is important, but 332 * driver to write disk sectors. Performance is important, but the 404 * driver to write disk sectors. Works with any alignment in SRC.
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
H A D | aiclib.h | 137 aic_sector_div(sector_t capacity, int heads, int sectors) aic_sector_div() argument 140 sector_div(capacity, (heads * sectors)); aic_sector_div()
|
H A D | aic7xxx_osm.c | 317 * possible for transactions on far away sectors to never be serviced. 700 int sectors; ahc_linux_biosparam() local 719 sectors = 32; ahc_linux_biosparam() 720 cylinders = aic_sector_div(capacity, heads, sectors); ahc_linux_biosparam() 730 sectors = 63; ahc_linux_biosparam() 731 cylinders = aic_sector_div(capacity, heads, sectors); ahc_linux_biosparam() 734 geom[1] = sectors; ahc_linux_biosparam()
|
H A D | aic79xx_osm.c | 292 * possible for transactions on far away sectors to never be serviced. 728 int sectors; ahd_linux_biosparam() local 745 sectors = 32; ahd_linux_biosparam() 746 cylinders = aic_sector_div(capacity, heads, sectors); ahd_linux_biosparam() 754 sectors = 63; ahd_linux_biosparam() 755 cylinders = aic_sector_div(capacity, heads, sectors); ahd_linux_biosparam() 758 geom[1] = sectors; ahd_linux_biosparam()
|
/linux-4.1.27/drivers/spi/ |
H A D | spi-butterfly.c | 164 * so sectors 0 and 1 can't be partitions by themselves. 176 * sectors 3-5 = 512 pages * 264 bytes/page
|
/linux-4.1.27/fs/ufs/ |
H A D | ufs_fs.h | 393 __fs32 fs_npsect; /* # sectors/track including spares */ 412 __fs32 fs_nsect; /* sectors per track */ 413 __fs32 fs_spc; /* sectors per cylinder */ 473 __fs32 fs_npsect; /* # sectors/track including spares */ 757 __u32 s_npsect; /* # sectors/track including spares */ 764 __u32 s_nsect; /* sectors per track */ 765 __u32 s_spc; /* sectors per cylinder */ 937 __fs32 fs_npsect; /* # sectors/track including spares */
|
/linux-4.1.27/arch/cris/boot/tools/ |
H A D | build.c | 51 /* max nr of sectors of setup: don't change unless you also change 278 die("Write of setup sectors failed"); main()
|
/linux-4.1.27/fs/adfs/ |
H A D | map.c | 15 * The ADFS map is basically a set of sectors. Each sector is called a 20 * sectors. A fragment id is always idlen bits long.
|
/linux-4.1.27/drivers/block/drbd/ |
H A D | drbd_int.h | 589 u64 la_size_sect; /* last agreed size, unit sectors */ 914 /* size of out-of-sync range in sectors. */ 961 int rs_last_events; /* counter of read or write "events" (unit sectors) 965 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 966 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1149 * Offsets in (512 byte) sectors. 1155 * ==> bitmap sectors = md_size_sect - bm_offset 1165 * ==> bitmap sectors = Y = al_offset - bm_offset 1167 * [padding*] are zero or up to 7 unused 512 Byte sectors to the 1179 #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */ 1253 /* thus many _storage_ sectors are described by one bit */ 1268 /* how much _storage_ sectors we have per bitmap extent */ 1296 * log, leaving this many sectors for the bitmap. 1306 /* 16 TB in units of sectors */ 1341 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); 1506 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n", ov_out_of_sync_print() 1588 /* sets the number of 512 byte sectors of our virtual device */ drbd_set_my_capacity() 1841 /* Returns the number of 512 byte sectors of the device */ drbd_get_capacity()
|
H A D | drbd_proc.c | 214 * we convert to sectors in the display below. */ drbd_syncer_progress() 225 /* Total sectors may be slightly off for oddly drbd_syncer_progress()
|
H A D | drbd_worker.c | 502 unsigned int want; /* The number of sectors we want in-flight */ drbd_rs_controller() 503 int req_sect; /* Number of sectors to request in this turn */ drbd_rs_controller() 504 int correction; /* Number of sectors more we need in-flight */ drbd_rs_controller() 553 unsigned int sect_in; /* Number of sectors that came in since the last turn */ drbd_rs_number_requests() 579 * but "rs_in_flight" is in "sectors" (512 Byte). */ drbd_rs_number_requests() 702 /* adjust very last sectors, in case we are oddly sized */ make_resync_request() 876 /* adjust for verify start and stop sectors, respective reached position */ drbd_resync_finished() 1602 (int)part_stat_read(&disk->part0, sectors[0]) + drbd_rs_controller_reset() 1603 (int)part_stat_read(&disk->part0, sectors[1]); drbd_rs_controller_reset()
|
/linux-4.1.27/drivers/staging/i2o/ |
H A D | i2o_block.c | 544 unsigned long heads, sectors, cylinders; i2o_block_biosparam() local 546 sectors = 63L; /* Maximize sectors per track */ i2o_block_biosparam() 558 cylinders = (unsigned long)capacity / (heads * sectors); i2o_block_biosparam() 561 *secs = (unsigned char)sectors; i2o_block_biosparam() 637 &geo->cylinders, &geo->heads, &geo->sectors); i2o_block_getgeo() 1084 osm_debug("max sectors = %d\n", queue->max_sectors); i2o_block_probe()
|
H A D | i2o_scsi.c | 737 * @capacity: size in sectors 752 ip[1] = 32; /* sectors */ i2o_scsi_bios_param() 755 ip[1] = 63; /* sectors */ i2o_scsi_bios_param()
|
H A D | pci.c | 397 "%s: limit sectors per request to %d\n", c->name, i2o_pci_probe()
|
H A D | i2o.h | 50 /* Prefetch data when reading. We continually attempt to load the next 32 sectors 54 /* Prefetch data when reading. We sometimes attempt to load the next 32 sectors 532 unsigned int limit_sectors:1; /* limit number of sectors / request */
|
/linux-4.1.27/arch/alpha/kernel/ |
H A D | io.c | 252 * IDE driver to read disk sectors. Performance is important, but 346 * driver to write disk sectors. Performance is important, but the 386 * driver to write disk sectors. Works with any alignment in SRC.
|
/linux-4.1.27/fs/nfs/blocklayout/ |
H A D | blocklayout.h | 127 /* sector_t fields are all in 512-byte sectors */
|
/linux-4.1.27/arch/mn10300/boot/tools/ |
H A D | build.c | 39 /* Minimal number of setup sectors (see also bootsect.S) */
|
/linux-4.1.27/arch/m68k/emu/ |
H A D | nfblock.c | 88 geo->sectors = 16; nfhd_getgeo()
|
/linux-4.1.27/arch/arm/mach-davinci/ |
H A D | board-dm355-leopard.c | 36 * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
|
H A D | board-dm644x-evm.c | 50 /* bootloader (UBL, U-Boot, etc) in first 5 sectors */ 57 /* bootloader params in the next 1 sectors */
|
H A D | board-dm355-evm.c | 40 * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
|
/linux-4.1.27/drivers/s390/block/ |
H A D | xpram.c | 236 * multiple of 64 (32k): tell we have 16 sectors, 4 heads, xpram_getgeo() 242 geo->sectors = 16; xpram_getgeo()
|
H A D | dasd_proc.c | 230 seq_printf(m, "with %u sectors(512B each)\n", dasd_stats_proc_show()
|
H A D | dcssblk.c | 621 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors dcssblk_add_store() 623 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); dcssblk_add_store()
|
H A D | dasd.c | 754 long sectors, dasd_profile_end_add_data() 769 data->dasd_io_sects += sectors; dasd_profile_end_add_data() 785 data->dasd_read_sects += sectors; dasd_profile_end_add_data() 803 long tottimeps, sectors; dasd_profile_end() local 814 sectors = blk_rq_sectors(req); dasd_profile_end() 817 !sectors) dasd_profile_end() 824 tottimeps = tottime / sectors; dasd_profile_end() 826 dasd_profile_counter(sectors, sectors_ind); dasd_profile_end() 831 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); dasd_profile_end() 840 sectors, sectors_ind, tottime_ind, dasd_profile_end() 853 sectors, sectors_ind, tottime_ind, dasd_profile_end() 865 sectors, sectors_ind, tottime_ind, dasd_profile_end() 750 dasd_profile_end_add_data(struct dasd_profile_info *data, int is_alias, int is_tpm, int is_read, long sectors, int sectors_ind, int tottime_ind, int tottimeps_ind, int strtime_ind, int irqtime_ind, int irqtimeps_ind, int endtime_ind) dasd_profile_end_add_data() argument
|
H A D | dasd_eckd.c | 63 /* 64k are 128 x 512 byte sectors */ 1787 "with %d cylinders, %d heads, %d sectors%s\n", dasd_eckd_check_characteristics() 2081 geo->sectors = recs_per_track(&private->rdc_data, dasd_eckd_fill_geometry() 3321 sector_t first_trk, last_trk, sectors; dasd_raw_build_cp() local 3410 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) dasd_raw_build_cp() 3433 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
|
/linux-4.1.27/drivers/block/mtip32xx/ |
H A D | mtip32xx.c | 596 dma_addr_t buffer_dma, unsigned int sectors); 1472 * @sectors page length to fetch, in sectors 1478 dma_addr_t buffer_dma, unsigned int sectors) mtip_read_log_page() 1486 fis.sect_count = sectors & 0xFF; mtip_read_log_page() 1487 fis.sect_cnt_ex = (sectors >> 8) & 0xFF; mtip_read_log_page() 1492 memset(buffer, 0, sectors * ATA_SECT_SIZE); mtip_read_log_page() 1498 sectors * ATA_SECT_SIZE, mtip_read_log_page() 1596 * Trim unused sectors 1600 * @len # of 512b sectors to trim 1675 * @sectors Pointer to the variable that will receive the sector count. 1681 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) mtip_hw_get_capacity() argument 1690 *sectors = total; mtip_hw_get_capacity() 1704 sector_t sectors; mtip_dump_identify() local 1726 if (mtip_hw_get_capacity(port->dd, §ors)) mtip_dump_identify() 1728 "Capacity: %llu sectors (%llu MB)\n", mtip_dump_identify() 1729 (u64)sectors, mtip_dump_identify() 1730 ((u64)sectors) * ATA_SECT_SIZE >> 20); mtip_dump_identify() 2366 * @nsect Number of sectors to read. 3609 * device as having 224 heads and 56 sectors per cylinder. These values are 3638 geo->sectors = 56; mtip_block_getgeo() 3639 sector_div(capacity, (geo->heads * geo->sectors)); mtip_block_getgeo() 3961 /* Set the capacity of the device in 512 byte sectors. */ mtip_block_initialize() 1477 mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, dma_addr_t buffer_dma, unsigned int sectors) mtip_read_log_page() argument
|
/linux-4.1.27/drivers/cdrom/ |
H A D | gdrom.c | 580 * 8 -> sectors >> 16 581 * 9 -> sectors >> 8 582 * 10 -> sectors
|
/linux-4.1.27/fs/fat/ |
H A D | fatent.c | 577 * Issue discard for the sectors we no longer fat_free_clusters() 633 /* 128kb is the whole sectors for FAT12 and FAT16 */
|
H A D | inode.c | 1331 if (get_unaligned_le16(&b->sectors)) fat_bpb_is_zero() 1356 bpb->fat_sectors = get_unaligned_le16(&b->sectors); fat_read_bpb() 1373 "bogus number of reserved sectors"); fat_read_bpb() 1405 fat_msg(sb, KERN_ERR, "bogus sectors per cluster %u", fat_read_bpb() 1458 "This looks like a DOS 1.x volume, but isn't a recognized floppy size (%llu sectors)", fat_read_static_bpb()
|
/linux-4.1.27/drivers/mtd/devices/ |
H A D | bcm47xxsflash.c | 76 /* Newer flashes have "sub-sectors" which can be erased bcm47xxsflash_erase()
|
H A D | st_spi_fsm.c | 402 * - Supports 'DYB' sector protection. Depending on variant, sectors 417 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 428 /* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */ 1428 * unlock sectors if necessary (some variants power-on with sectors stfsm_s25fl_config() 1438 /* Handle bottom/top 4KiB parameter sectors */ stfsm_s25fl_config() 1794 * one or more erase sectors. Return an error is there is a problem erasing.
|
/linux-4.1.27/drivers/mtd/spi-nor/ |
H A D | spi-nor.c | 297 * one or more erase sectors. Return an error is there is a problem erasing. 595 * for the chips listed here (without boot sectors). 618 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 666 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
|
/linux-4.1.27/drivers/memstick/core/ |
H A D | mspro_block.c | 240 geo->sectors = msb->sectors_per_track; mspro_block_bd_getgeo() 431 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sectors: %x\n", mspro_block_attr_show_mbr() 434 "sectors per partition: %x\n", mspro_block_attr_show_mbr() 492 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per track: %x\n", mspro_block_attr_show_devinfo()
|
/linux-4.1.27/drivers/scsi/megaraid/ |
H A D | megaraid_mbox.h | 99 #define MBOX_MAX_SECTORS 128 // maximum sectors per IO
|
H A D | mega_common.h | 135 * @max_sectors : max sectors per request
|
/linux-4.1.27/drivers/block/xen-blkback/ |
H A D | xenbus.c | 812 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", connect() 815 xenbus_dev_fatal(dev, err, "writing %s/sectors", connect()
|
/linux-4.1.27/fs/f2fs/ |
H A D | segment.h | 101 #define SECTOR_TO_BLOCK(sectors) \ 102 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | vio.h | 188 u16 num_sec; /* Num sectors */
|
/linux-4.1.27/arch/x86/boot/tools/ |
H A D | build.c | 46 /* Minimal number of setup sectors */
|
/linux-4.1.27/arch/s390/include/uapi/asm/ |
H A D | dasd.h | 156 unsigned int dasd_io_sects; /* number of sectors processed at all */
|
/linux-4.1.27/drivers/mtd/onenand/ |
H A D | onenand_base.c | 244 * @param sectors the sector address 245 * @param count the number of sectors 250 static int onenand_buffer_address(int dataram1, int sectors, int count) onenand_buffer_address() argument 255 bsa = sectors & ONENAND_BSA_MASK; onenand_buffer_address() 453 int sectors = 0, count = 0; onenand_command() local 475 value = onenand_page_address(page, sectors); onenand_command() 479 value = onenand_buffer_address(dataram, sectors, count); onenand_command() 2861 int sectors = 4, count = 4; onenand_otp_command() local 2873 value = onenand_page_address(page, sectors); onenand_otp_command() 2878 value = onenand_buffer_address(dataram, sectors, count); onenand_otp_command()
|