Lines Matching refs:geo

552 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)  in __raid10_find_phys()  argument
562 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in __raid10_find_phys()
563 last_far_set_start *= geo->far_set_size; in __raid10_find_phys()
565 last_far_set_size = geo->far_set_size; in __raid10_find_phys()
566 last_far_set_size += (geo->raid_disks % geo->far_set_size); in __raid10_find_phys()
569 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys()
570 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys()
572 chunk *= geo->near_copies; in __raid10_find_phys()
574 dev = sector_div(stripe, geo->raid_disks); in __raid10_find_phys()
575 if (geo->far_offset) in __raid10_find_phys()
576 stripe *= geo->far_copies; in __raid10_find_phys()
578 sector += stripe << geo->chunk_shift; in __raid10_find_phys()
581 for (n = 0; n < geo->near_copies; n++) { in __raid10_find_phys()
589 for (f = 1; f < geo->far_copies; f++) { in __raid10_find_phys()
590 set = d / geo->far_set_size; in __raid10_find_phys()
591 d += geo->near_copies; in __raid10_find_phys()
593 if ((geo->raid_disks % geo->far_set_size) && in __raid10_find_phys()
599 d %= geo->far_set_size; in __raid10_find_phys()
600 d += geo->far_set_size * set; in __raid10_find_phys()
602 s += geo->stride; in __raid10_find_phys()
608 if (dev >= geo->raid_disks) { in __raid10_find_phys()
610 sector += (geo->chunk_mask + 1); in __raid10_find_phys()
617 struct geom *geo = &conf->geo; in raid10_find_phys() local
623 geo = &conf->prev; in raid10_find_phys()
627 __raid10_find_phys(geo, r10bio); in raid10_find_phys()
636 struct geom *geo = &conf->geo; in raid10_find_virt() local
637 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; in raid10_find_virt()
638 int far_set_size = geo->far_set_size; in raid10_find_virt()
641 if (geo->raid_disks % geo->far_set_size) { in raid10_find_virt()
642 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in raid10_find_virt()
643 last_far_set_start *= geo->far_set_size; in raid10_find_virt()
646 far_set_size = geo->far_set_size; in raid10_find_virt()
647 far_set_size += (geo->raid_disks % geo->far_set_size); in raid10_find_virt()
652 offset = sector & geo->chunk_mask; in raid10_find_virt()
653 if (geo->far_offset) { in raid10_find_virt()
655 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
656 fc = sector_div(chunk, geo->far_copies); in raid10_find_virt()
657 dev -= fc * geo->near_copies; in raid10_find_virt()
661 while (sector >= geo->stride) { in raid10_find_virt()
662 sector -= geo->stride; in raid10_find_virt()
663 if (dev < (geo->near_copies + far_set_start)) in raid10_find_virt()
664 dev += far_set_size - geo->near_copies; in raid10_find_virt()
666 dev -= geo->near_copies; in raid10_find_virt()
668 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
670 vchunk = chunk * geo->raid_disks + dev; in raid10_find_virt()
671 sector_div(vchunk, geo->near_copies); in raid10_find_virt()
672 return (vchunk << geo->chunk_shift) + offset; in raid10_find_virt()
694 struct geom *geo = &conf->geo; in raid10_mergeable_bvec() local
696 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; in raid10_mergeable_bvec()
700 geo = &conf->prev; in raid10_mergeable_bvec()
702 if (geo->near_copies < geo->raid_disks) { in raid10_mergeable_bvec()
793 struct geom *geo = &conf->geo; in read_balance() local
874 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) in read_balance()
878 if (geo->far_copies > 1) in read_balance()
923 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) in raid10_congested()
1539 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in make_request()
1559 && (conf->geo.near_copies < conf->geo.raid_disks in make_request()
1583 if (conf->geo.near_copies < conf->geo.raid_disks) in status()
1585 if (conf->geo.near_copies > 1) in status()
1586 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in status()
1587 if (conf->geo.far_copies > 1) { in status()
1588 if (conf->geo.far_offset) in status()
1589 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in status()
1591 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in status()
1593 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in status()
1594 conf->geo.raid_disks - mddev->degraded); in status()
1595 for (i = 0; i < conf->geo.raid_disks; i++) in status()
1616 disks = conf->geo.raid_disks; in _enough()
1617 ncopies = conf->geo.near_copies; in _enough()
1689 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1702 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1703 conf->geo.raid_disks); in print_conf()
1705 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1737 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1779 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1884 number < conf->geo.raid_disks && in raid10_remove_disk()
2849 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2903 sector_t chunk_mask = conf->geo.chunk_mask; in sync_request()
2949 else for (i = 0; i < conf->geo.raid_disks; i++) { in sync_request()
2963 for (i = 0; i < conf->geo.raid_disks; i++) in sync_request()
2980 if (chunks_skipped >= conf->geo.raid_disks) { in sync_request()
2994 if (conf->geo.near_copies < conf->geo.raid_disks && in sync_request()
3019 for (i = 0 ; i < conf->geo.raid_disks; i++) { in sync_request()
3080 for (j = 0; j < conf->geo.raid_disks; j++) in sync_request()
3423 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3428 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3429 sector_div(size, conf->geo.far_copies); in raid10_size()
3431 sector_div(size, conf->geo.near_copies); in raid10_size()
3433 return size << conf->geo.chunk_shift; in raid10_size()
3443 size = size >> conf->geo.chunk_shift; in calc_sectors()
3444 sector_div(size, conf->geo.far_copies); in calc_sectors()
3445 size = size * conf->geo.raid_disks; in calc_sectors()
3446 sector_div(size, conf->geo.near_copies); in calc_sectors()
3454 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3456 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3458 if (conf->geo.far_offset) in calc_sectors()
3459 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3461 sector_div(size, conf->geo.far_copies); in calc_sectors()
3462 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3467 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3498 geo->raid_disks = disks; in setup_geo()
3499 geo->near_copies = nc; in setup_geo()
3500 geo->far_copies = fc; in setup_geo()
3501 geo->far_offset = fo; in setup_geo()
3502 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; in setup_geo()
3503 geo->chunk_mask = chunk - 1; in setup_geo()
3504 geo->chunk_shift = ffz(~chunk); in setup_geo()
3512 struct geom geo; in setup_conf() local
3515 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3546 conf->geo = geo; in setup_conf()
3555 conf->prev = conf->geo; in setup_conf()
3627 if (conf->geo.raid_disks % conf->geo.near_copies) in run()
3628 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3631 (conf->geo.raid_disks / conf->geo.near_copies)); in run()
3641 if (disk_idx >= conf->geo.raid_disks && in run()
3693 if (conf->geo.far_copies != 1 && in run()
3694 conf->geo.far_offset == 0) in run()
3703 i < conf->geo.raid_disks in run()
3733 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3734 conf->geo.raid_disks); in run()
3744 int stripe = conf->geo.raid_disks * in run()
3751 stripe /= conf->geo.near_copies; in run()
3764 after_length = ((1 << conf->geo.chunk_shift) * in run()
3765 conf->geo.far_copies); in run()
3843 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
3946 struct geom geo; in raid10_check_reshape() local
3948 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
3951 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3954 if (geo.far_copies > 1 && !geo.far_offset) in raid10_check_reshape()
3958 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
4013 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4017 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4027 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4066 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4067 conf->geo.far_copies); in raid10_start_reshape()
4102 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4122 conf->geo.raid_disks), in raid10_start_reshape()
4154 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4178 conf->geo = conf->prev; in raid10_start_reshape()
4179 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4196 static sector_t last_dev_address(sector_t s, struct geom *geo) in last_dev_address() argument
4198 s = (s | geo->chunk_mask) + 1; in last_dev_address()
4199 s >>= geo->chunk_shift; in last_dev_address()
4200 s *= geo->near_copies; in last_dev_address()
4201 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); in last_dev_address()
4202 s *= geo->far_copies; in last_dev_address()
4203 s <<= geo->chunk_shift; in last_dev_address()
4211 static sector_t first_dev_address(sector_t s, struct geom *geo) in first_dev_address() argument
4213 s >>= geo->chunk_shift; in first_dev_address()
4214 s *= geo->near_copies; in first_dev_address()
4215 sector_div(s, geo->raid_disks); in first_dev_address()
4216 s *= geo->far_copies; in first_dev_address()
4217 s <<= geo->chunk_shift; in first_dev_address()
4299 &conf->geo); in reshape_request()
4311 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4319 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4333 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4402 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4535 conf->prev = conf->geo; in end_reshape()
4546 int stripe = conf->geo.raid_disks * in end_reshape()
4548 stripe /= conf->geo.near_copies; in end_reshape()
4673 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4674 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4685 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()