Lines Matching refs:conf

98 static void allow_barrier(struct r10conf *conf);
99 static void lower_barrier(struct r10conf *conf);
100 static int _enough(struct r10conf *conf, int previous, int ignore);
105 static void end_reshape(struct r10conf *conf);
109 struct r10conf *conf = data; in r10bio_pool_alloc() local
110 int size = offsetof(struct r10bio, devs[conf->copies]); in r10bio_pool_alloc()
139 struct r10conf *conf = data; in r10buf_pool_alloc() local
146 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
152 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
164 if (!conf->have_replacement) in r10buf_pool_alloc()
180 &conf->mddev->recovery)) { in r10buf_pool_alloc()
213 r10bio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
220 struct r10conf *conf = data; in r10buf_pool_free() local
224 for (j=0; j < conf->copies; j++) { in r10buf_pool_free()
237 r10bio_pool_free(r10bio, conf); in r10buf_pool_free()
240 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
244 for (i = 0; i < conf->copies; i++) { in put_all_bios()
258 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
260 put_all_bios(conf, r10_bio); in free_r10bio()
261 mempool_free(r10_bio, conf->r10bio_pool); in free_r10bio()
266 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
268 mempool_free(r10_bio, conf->r10buf_pool); in put_buf()
270 lower_barrier(conf); in put_buf()
277 struct r10conf *conf = mddev->private; in reschedule_retry() local
279 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
280 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
281 conf->nr_queued ++; in reschedule_retry()
282 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
285 wake_up(&conf->wait_barrier); in reschedule_retry()
299 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
303 spin_lock_irqsave(&conf->device_lock, flags); in raid_end_bio_io()
306 spin_unlock_irqrestore(&conf->device_lock, flags); in raid_end_bio_io()
317 allow_barrier(conf); in raid_end_bio_io()
327 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
336 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
342 for (slot = 0; slot < conf->copies; slot++) { in find_bio_disk()
351 BUG_ON(slot == conf->copies); in find_bio_disk()
367 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
394 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
400 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
408 mdname(conf->mddev), in raid10_end_read_request()
447 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
454 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
458 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
524 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
615 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
617 struct geom *geo = &conf->geo; in raid10_find_phys()
619 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
620 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
621 conf->mddev->reshape_backwards)) { in raid10_find_phys()
623 geo = &conf->prev; in raid10_find_phys()
630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
636 struct geom *geo = &conf->geo; in raid10_find_virt()
689 struct r10conf *conf = mddev->private; in raid10_mergeable_bvec() local
694 struct geom *geo = &conf->geo; in raid10_mergeable_bvec()
696 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; in raid10_mergeable_bvec()
697 if (conf->reshape_progress != MaxSector && in raid10_mergeable_bvec()
698 ((sector >= conf->reshape_progress) != in raid10_mergeable_bvec()
699 conf->mddev->reshape_backwards)) in raid10_mergeable_bvec()
700 geo = &conf->prev; in raid10_mergeable_bvec()
716 struct r10dev devs[conf->copies]; in raid10_mergeable_bvec()
720 if (conf->reshape_progress != MaxSector) { in raid10_mergeable_bvec()
727 raid10_find_phys(conf, r10_bio); in raid10_mergeable_bvec()
729 for (s = 0; s < conf->copies; s++) { in raid10_mergeable_bvec()
732 conf->mirrors[disk].rdev); in raid10_mergeable_bvec()
744 rdev = rcu_dereference(conf->mirrors[disk].replacement); in raid10_mergeable_bvec()
781 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
793 struct geom *geo = &conf->geo; in read_balance()
795 raid10_find_phys(conf, r10_bio); in read_balance()
810 if (conf->mddev->recovery_cp < MaxSector in read_balance()
811 && (this_sector + sectors >= conf->next_resync)) in read_balance()
814 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
822 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
826 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
882 conf->mirrors[disk].head_position); in read_balance()
889 if (slot >= conf->copies) { in read_balance()
900 rdev_dec_pending(rdev, conf->mddev); in read_balance()
914 struct r10conf *conf = mddev->private; in raid10_congested() local
918 conf->pending_count >= max_queued_requests) in raid10_congested()
923 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) in raid10_congested()
926 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_congested()
937 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
942 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
944 if (conf->pending_bio_list.head) { in flush_pending_writes()
946 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
947 conf->pending_count = 0; in flush_pending_writes()
948 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
951 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
952 wake_up(&conf->wait_barrier); in flush_pending_writes()
966 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
991 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
993 BUG_ON(force && !conf->barrier); in raise_barrier()
994 spin_lock_irq(&conf->resync_lock); in raise_barrier()
997 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, in raise_barrier()
998 conf->resync_lock); in raise_barrier()
1001 conf->barrier++; in raise_barrier()
1004 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
1005 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, in raise_barrier()
1006 conf->resync_lock); in raise_barrier()
1008 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
1011 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
1014 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
1015 conf->barrier--; in lower_barrier()
1016 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
1017 wake_up(&conf->wait_barrier); in lower_barrier()
1020 static void wait_barrier(struct r10conf *conf) in wait_barrier() argument
1022 spin_lock_irq(&conf->resync_lock); in wait_barrier()
1023 if (conf->barrier) { in wait_barrier()
1024 conf->nr_waiting++; in wait_barrier()
1034 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
1035 !conf->barrier || in wait_barrier()
1036 (conf->nr_pending && in wait_barrier()
1039 conf->resync_lock); in wait_barrier()
1040 conf->nr_waiting--; in wait_barrier()
1042 conf->nr_pending++; in wait_barrier()
1043 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
1046 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
1049 spin_lock_irqsave(&conf->resync_lock, flags); in allow_barrier()
1050 conf->nr_pending--; in allow_barrier()
1051 spin_unlock_irqrestore(&conf->resync_lock, flags); in allow_barrier()
1052 wake_up(&conf->wait_barrier); in allow_barrier()
1055 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
1069 spin_lock_irq(&conf->resync_lock); in freeze_array()
1070 conf->barrier++; in freeze_array()
1071 conf->nr_waiting++; in freeze_array()
1072 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
1073 conf->nr_pending == conf->nr_queued+extra, in freeze_array()
1074 conf->resync_lock, in freeze_array()
1075 flush_pending_writes(conf)); in freeze_array()
1077 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1080 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
1083 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1084 conf->barrier--; in unfreeze_array()
1085 conf->nr_waiting--; in unfreeze_array()
1086 wake_up(&conf->wait_barrier); in unfreeze_array()
1087 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1111 struct r10conf *conf = mddev->private; in raid10_unplug() local
1115 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1116 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1117 conf->pending_count += plug->pending_cnt; in raid10_unplug()
1118 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1119 wake_up(&conf->wait_barrier); in raid10_unplug()
1128 wake_up(&conf->wait_barrier); in raid10_unplug()
1146 struct r10conf *conf = mddev->private; in __make_request() local
1169 wait_barrier(conf); in __make_request()
1173 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1178 allow_barrier(conf); in __make_request()
1179 wait_event(conf->wait_barrier, in __make_request()
1180 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1181 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1183 wait_barrier(conf); in __make_request()
1188 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1191 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1193 mddev->reshape_position = conf->reshape_progress; in __make_request()
1200 conf->reshape_safe = mddev->reshape_position; in __make_request()
1203 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1230 rdev = read_balance(conf, r10_bio, &max_sectors); in __make_request()
1258 spin_lock_irq(&conf->device_lock); in __make_request()
1263 spin_unlock_irq(&conf->device_lock); in __make_request()
1271 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1288 if (conf->pending_count >= max_queued_requests) { in __make_request()
1290 wait_event(conf->wait_barrier, in __make_request()
1291 conf->pending_count < max_queued_requests); in __make_request()
1306 raid10_find_phys(conf, r10_bio); in __make_request()
1312 for (i = 0; i < conf->copies; i++) { in __make_request()
1314 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in __make_request()
1316 conf->mirrors[d].replacement); in __make_request()
1404 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in __make_request()
1409 rdev = conf->mirrors[d].replacement; in __make_request()
1413 rdev = conf->mirrors[d].rdev; in __make_request()
1418 allow_barrier(conf); in __make_request()
1420 wait_barrier(conf); in __make_request()
1429 spin_lock_irq(&conf->device_lock); in __make_request()
1434 spin_unlock_irq(&conf->device_lock); in __make_request()
1442 for (i = 0; i < conf->copies; i++) { in __make_request()
1446 struct md_rdev *rdev = conf->mirrors[d].rdev; in __make_request()
1470 spin_lock_irqsave(&conf->device_lock, flags); in __make_request()
1475 bio_list_add(&conf->pending_bio_list, mbio); in __make_request()
1476 conf->pending_count++; in __make_request()
1478 spin_unlock_irqrestore(&conf->device_lock, flags); in __make_request()
1484 struct md_rdev *rdev = conf->mirrors[d].replacement; in __make_request()
1488 rdev = conf->mirrors[d].rdev; in __make_request()
1505 spin_lock_irqsave(&conf->device_lock, flags); in __make_request()
1506 bio_list_add(&conf->pending_bio_list, mbio); in __make_request()
1507 conf->pending_count++; in __make_request()
1508 spin_unlock_irqrestore(&conf->device_lock, flags); in __make_request()
1523 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); in __make_request()
1538 struct r10conf *conf = mddev->private; in make_request() local
1539 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in make_request()
1559 && (conf->geo.near_copies < conf->geo.raid_disks in make_request()
1560 || conf->prev.near_copies < in make_request()
1561 conf->prev.raid_disks))) { in make_request()
1575 wake_up(&conf->wait_barrier); in make_request()
1580 struct r10conf *conf = mddev->private; in status() local
1583 if (conf->geo.near_copies < conf->geo.raid_disks) in status()
1585 if (conf->geo.near_copies > 1) in status()
1586 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in status()
1587 if (conf->geo.far_copies > 1) { in status()
1588 if (conf->geo.far_offset) in status()
1589 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in status()
1591 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in status()
1593 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in status()
1594 conf->geo.raid_disks - mddev->degraded); in status()
1595 for (i = 0; i < conf->geo.raid_disks; i++) in status()
1597 conf->mirrors[i].rdev && in status()
1598 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); in status()
1607 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1613 disks = conf->prev.raid_disks; in _enough()
1614 ncopies = conf->prev.near_copies; in _enough()
1616 disks = conf->geo.raid_disks; in _enough()
1617 ncopies = conf->geo.near_copies; in _enough()
1622 int n = conf->copies; in _enough()
1628 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
1643 static int enough(struct r10conf *conf, int ignore) in enough() argument
1650 return _enough(conf, 0, ignore) && in enough()
1651 _enough(conf, 1, ignore); in enough()
1657 struct r10conf *conf = mddev->private; in error() local
1666 spin_lock_irqsave(&conf->device_lock, flags); in error()
1668 && !enough(conf, rdev->raid_disk)) { in error()
1672 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1684 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1689 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1692 static void print_conf(struct r10conf *conf) in print_conf() argument
1698 if (!conf) { in print_conf()
1702 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1703 conf->geo.raid_disks); in print_conf()
1705 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1707 tmp = conf->mirrors + i; in print_conf()
1716 static void close_sync(struct r10conf *conf) in close_sync() argument
1718 wait_barrier(conf); in close_sync()
1719 allow_barrier(conf); in close_sync()
1721 mempool_destroy(conf->r10buf_pool); in close_sync()
1722 conf->r10buf_pool = NULL; in close_sync()
1728 struct r10conf *conf = mddev->private; in raid10_spare_active() local
1737 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1738 tmp = conf->mirrors + i; in raid10_spare_active()
1765 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
1767 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
1769 print_conf(conf); in raid10_spare_active()
1775 struct r10conf *conf = mddev->private; in raid10_add_disk() local
1779 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1787 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
1799 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
1804 struct raid10_info *p = &conf->mirrors[mirror]; in raid10_add_disk()
1818 conf->fullsync = 1; in raid10_add_disk()
1832 conf->fullsync = 1; in raid10_add_disk()
1845 freeze_array(conf, 0); in raid10_add_disk()
1846 unfreeze_array(conf); in raid10_add_disk()
1853 print_conf(conf); in raid10_add_disk()
1859 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
1863 struct raid10_info *p = conf->mirrors + number; in raid10_remove_disk()
1865 print_conf(conf); in raid10_remove_disk()
1884 number < conf->geo.raid_disks && in raid10_remove_disk()
1885 enough(conf, -1)) { in raid10_remove_disk()
1915 print_conf(conf); in raid10_remove_disk()
1922 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
1929 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1938 &conf->mirrors[d].rdev->corrected_errors); in end_sync_read()
1943 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in end_sync_read()
1985 struct r10conf *conf = mddev->private; in end_sync_write() local
1993 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1995 rdev = conf->mirrors[d].replacement; in end_sync_write()
1997 rdev = conf->mirrors[d].rdev; in end_sync_write()
2038 struct r10conf *conf = mddev->private; in sync_request_write() local
2046 for (i=0; i<conf->copies; i++) in sync_request_write()
2050 if (i == conf->copies) in sync_request_write()
2058 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2114 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2116 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2118 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2119 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request_write()
2126 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2140 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2172 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2189 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2197 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2219 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2221 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2231 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2248 struct r10conf *conf = mddev->private; in recovery_request_write() local
2272 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2273 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2277 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2278 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2353 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2364 rdev = conf->mirrors[d].rdev; in fix_read_error()
2385 md_error(mddev, conf->mirrors[d].rdev); in fix_read_error()
2405 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2417 conf->tmppage, READ, false); in fix_read_error()
2424 if (sl == conf->copies) in fix_read_error()
2435 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2456 sl = conf->copies; in fix_read_error()
2459 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2470 s, conf->tmppage, WRITE) in fix_read_error()
2496 sl = conf->copies; in fix_read_error()
2499 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2509 s, conf->tmppage, in fix_read_error()
2553 struct r10conf *conf = mddev->private; in narrow_write_error() local
2554 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2611 struct r10conf *conf = mddev->private; in handle_read_error() local
2631 freeze_array(conf, 1); in handle_read_error()
2632 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2633 unfreeze_array(conf); in handle_read_error()
2640 rdev = read_balance(conf, r10_bio, &max_sectors); in handle_read_error()
2677 spin_lock_irq(&conf->device_lock); in handle_read_error()
2682 spin_unlock_irq(&conf->device_lock); in handle_read_error()
2685 r10_bio = mempool_alloc(conf->r10bio_pool, in handle_read_error()
2701 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2714 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2716 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2730 md_error(conf->mddev, rdev); in handle_write_completed()
2732 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2746 md_error(conf->mddev, rdev); in handle_write_completed()
2751 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2754 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2760 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2764 md_error(conf->mddev, rdev); in handle_write_completed()
2768 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2771 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2777 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2792 struct r10conf *conf = mddev->private; in raid10d() local
2793 struct list_head *head = &conf->retry_list; in raid10d()
2801 flush_pending_writes(conf); in raid10d()
2803 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2805 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2810 conf->nr_queued--; in raid10d()
2811 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2814 conf = mddev->private; in raid10d()
2817 handle_write_completed(conf, r10_bio); in raid10d()
2841 static int init_resync(struct r10conf *conf) in init_resync() argument
2847 BUG_ON(conf->r10buf_pool); in init_resync()
2848 conf->have_replacement = 0; in init_resync()
2849 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2850 if (conf->mirrors[i].replacement) in init_resync()
2851 conf->have_replacement = 1; in init_resync()
2852 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
2853 if (!conf->r10buf_pool) in init_resync()
2855 conf->next_resync = 0; in init_resync()
2894 struct r10conf *conf = mddev->private; in sync_request() local
2903 sector_t chunk_mask = conf->geo.chunk_mask; in sync_request()
2905 if (!conf->r10buf_pool) in sync_request()
2906 if (init_resync(conf)) in sync_request()
2919 conf->fullsync == 0) { in sync_request()
2940 end_reshape(conf); in sync_request()
2941 close_sync(conf); in sync_request()
2949 else for (i = 0; i < conf->geo.raid_disks; i++) { in sync_request()
2951 raid10_find_virt(conf, mddev->curr_resync, i); in sync_request()
2957 if ((!mddev->bitmap || conf->fullsync) in sync_request()
2958 && conf->have_replacement in sync_request()
2963 for (i = 0; i < conf->geo.raid_disks; i++) in sync_request()
2964 if (conf->mirrors[i].replacement) in sync_request()
2965 conf->mirrors[i].replacement in sync_request()
2969 conf->fullsync = 0; in sync_request()
2972 close_sync(conf); in sync_request()
2980 if (chunks_skipped >= conf->geo.raid_disks) { in sync_request()
2994 if (conf->geo.near_copies < conf->geo.raid_disks && in sync_request()
3019 for (i = 0 ; i < conf->geo.raid_disks; i++) { in sync_request()
3025 struct raid10_info *mirror = &conf->mirrors[i]; in sync_request()
3038 sect = raid10_find_virt(conf, sector_nr, i); in sync_request()
3055 !conf->fullsync) { in sync_request()
3063 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3065 raise_barrier(conf, rb2 != NULL); in sync_request()
3075 raid10_find_phys(conf, r10_bio); in sync_request()
3080 for (j = 0; j < conf->geo.raid_disks; j++) in sync_request()
3081 if (conf->mirrors[j].rdev == NULL || in sync_request()
3082 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { in sync_request()
3091 for (j=0; j<conf->copies;j++) { in sync_request()
3098 if (!conf->mirrors[d].rdev || in sync_request()
3099 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) in sync_request()
3103 rdev = conf->mirrors[d].rdev; in sync_request()
3132 for (k=0; k<conf->copies; k++) in sync_request()
3135 BUG_ON(k == conf->copies); in sync_request()
3186 if (j == conf->copies) { in sync_request()
3194 for (k = 0; k < conf->copies; k++) in sync_request()
3244 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in sync_request()
3252 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in sync_request()
3257 raise_barrier(conf, 0); in sync_request()
3258 conf->next_resync = sector_nr; in sync_request()
3263 raid10_find_phys(conf, r10_bio); in sync_request()
3266 for (i = 0; i < conf->copies; i++) { in sync_request()
3277 if (conf->mirrors[d].rdev == NULL || in sync_request()
3278 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) in sync_request()
3281 if (is_badblock(conf->mirrors[d].rdev, in sync_request()
3293 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3301 conf->mirrors[d].rdev->data_offset; in sync_request()
3302 bio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request()
3305 if (conf->mirrors[d].replacement == NULL || in sync_request()
3307 &conf->mirrors[d].replacement->flags)) in sync_request()
3316 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3323 conf->mirrors[d].replacement->data_offset; in sync_request()
3324 bio->bi_bdev = conf->mirrors[d].replacement->bdev; in sync_request()
3329 for (i=0; i<conf->copies; i++) { in sync_request()
3332 rdev_dec_pending(conf->mirrors[d].rdev, in sync_request()
3337 conf->mirrors[d].replacement, in sync_request()
3420 struct r10conf *conf = mddev->private; in raid10_size() local
3423 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3424 conf->prev.raid_disks); in raid10_size()
3426 sectors = conf->dev_sectors; in raid10_size()
3428 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3429 sector_div(size, conf->geo.far_copies); in raid10_size()
3431 sector_div(size, conf->geo.near_copies); in raid10_size()
3433 return size << conf->geo.chunk_shift; in raid10_size()
3436 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3443 size = size >> conf->geo.chunk_shift; in calc_sectors()
3444 sector_div(size, conf->geo.far_copies); in calc_sectors()
3445 size = size * conf->geo.raid_disks; in calc_sectors()
3446 sector_div(size, conf->geo.near_copies); in calc_sectors()
3449 size = size * conf->copies; in calc_sectors()
3454 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3456 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3458 if (conf->geo.far_offset) in calc_sectors()
3459 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3461 sector_div(size, conf->geo.far_copies); in calc_sectors()
3462 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3510 struct r10conf *conf = NULL; in setup_conf() local
3531 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
3532 if (!conf) in setup_conf()
3536 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + in setup_conf()
3539 if (!conf->mirrors) in setup_conf()
3542 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3543 if (!conf->tmppage) in setup_conf()
3546 conf->geo = geo; in setup_conf()
3547 conf->copies = copies; in setup_conf()
3548 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, in setup_conf()
3549 r10bio_pool_free, conf); in setup_conf()
3550 if (!conf->r10bio_pool) in setup_conf()
3553 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3555 conf->prev = conf->geo; in setup_conf()
3556 conf->reshape_progress = MaxSector; in setup_conf()
3558 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3562 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3563 if (conf->prev.far_offset) in setup_conf()
3564 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
3567 conf->prev.stride = conf->dev_sectors; in setup_conf()
3569 conf->reshape_safe = conf->reshape_progress; in setup_conf()
3570 spin_lock_init(&conf->device_lock); in setup_conf()
3571 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3573 spin_lock_init(&conf->resync_lock); in setup_conf()
3574 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3576 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3577 if (!conf->thread) in setup_conf()
3580 conf->mddev = mddev; in setup_conf()
3581 return conf; in setup_conf()
3587 if (conf) { in setup_conf()
3588 if (conf->r10bio_pool) in setup_conf()
3589 mempool_destroy(conf->r10bio_pool); in setup_conf()
3590 kfree(conf->mirrors); in setup_conf()
3591 safe_put_page(conf->tmppage); in setup_conf()
3592 kfree(conf); in setup_conf()
3599 struct r10conf *conf; in run() local
3609 conf = setup_conf(mddev); in run()
3610 if (IS_ERR(conf)) in run()
3611 return PTR_ERR(conf); in run()
3612 mddev->private = conf; in run()
3614 conf = mddev->private; in run()
3615 if (!conf) in run()
3618 mddev->thread = conf->thread; in run()
3619 conf->thread = NULL; in run()
3627 if (conf->geo.raid_disks % conf->geo.near_copies) in run()
3628 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3631 (conf->geo.raid_disks / conf->geo.near_copies)); in run()
3641 if (disk_idx >= conf->geo.raid_disks && in run()
3642 disk_idx >= conf->prev.raid_disks) in run()
3644 disk = conf->mirrors + disk_idx; in run()
3685 if (!enough(conf, -1)) { in run()
3691 if (conf->reshape_progress != MaxSector) { in run()
3693 if (conf->geo.far_copies != 1 && in run()
3694 conf->geo.far_offset == 0) in run()
3696 if (conf->prev.far_copies != 1 && in run()
3697 conf->prev.far_offset == 0) in run()
3703 i < conf->geo.raid_disks in run()
3704 || i < conf->prev.raid_disks; in run()
3707 disk = conf->mirrors + i; in run()
3722 conf->fullsync = 1; in run()
3733 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3734 conf->geo.raid_disks); in run()
3738 mddev->dev_sectors = conf->dev_sectors; in run()
3744 int stripe = conf->geo.raid_disks * in run()
3751 stripe /= conf->geo.near_copies; in run()
3759 if (conf->reshape_progress != MaxSector) { in run()
3762 before_length = ((1 << conf->prev.chunk_shift) * in run()
3763 conf->prev.far_copies); in run()
3764 after_length = ((1 << conf->geo.chunk_shift) * in run()
3765 conf->geo.far_copies); in run()
3772 conf->offset_diff = min_offset_diff; in run()
3786 if (conf->r10bio_pool) in run()
3787 mempool_destroy(conf->r10bio_pool); in run()
3788 safe_put_page(conf->tmppage); in run()
3789 kfree(conf->mirrors); in run()
3790 kfree(conf); in run()
3798 struct r10conf *conf = priv; in raid10_free() local
3800 if (conf->r10bio_pool) in raid10_free()
3801 mempool_destroy(conf->r10bio_pool); in raid10_free()
3802 safe_put_page(conf->tmppage); in raid10_free()
3803 kfree(conf->mirrors); in raid10_free()
3804 kfree(conf->mirrors_old); in raid10_free()
3805 kfree(conf->mirrors_new); in raid10_free()
3806 kfree(conf); in raid10_free()
3811 struct r10conf *conf = mddev->private; in raid10_quiesce() local
3815 raise_barrier(conf, 0); in raid10_quiesce()
3818 lower_barrier(conf); in raid10_quiesce()
3837 struct r10conf *conf = mddev->private; in raid10_resize() local
3843 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
3864 calc_sectors(conf, sectors); in raid10_resize()
3865 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3873 struct r10conf *conf; in raid10_takeover_raid0() local
3893 conf = setup_conf(mddev); in raid10_takeover_raid0()
3894 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
3900 conf->barrier = 1; in raid10_takeover_raid0()
3903 return conf; in raid10_takeover_raid0()
3945 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
3948 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
3951 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3962 if (!enough(conf, -1)) in raid10_check_reshape()
3965 kfree(conf->mirrors_new); in raid10_check_reshape()
3966 conf->mirrors_new = NULL; in raid10_check_reshape()
3969 conf->mirrors_new = kzalloc( in raid10_check_reshape()
3974 if (!conf->mirrors_new) in raid10_check_reshape()
3993 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
4001 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
4002 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4013 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4017 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4018 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4027 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4053 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
4061 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4064 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
4065 conf->prev.far_copies); in raid10_start_reshape()
4066 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4067 conf->geo.far_copies); in raid10_start_reshape()
4091 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4092 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4093 if (conf->mirrors_new) { in raid10_start_reshape()
4094 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4095 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4097 kfree(conf->mirrors_old); in raid10_start_reshape()
4098 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4099 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4100 conf->mirrors_new = NULL; in raid10_start_reshape()
4102 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4107 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4113 conf->reshape_progress = size; in raid10_start_reshape()
4115 conf->reshape_progress = 0; in raid10_start_reshape()
4116 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4117 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4122 conf->geo.raid_disks), in raid10_start_reshape()
4133 conf->prev.raid_disks) in raid10_start_reshape()
4141 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4151 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4152 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4153 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4154 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4155 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4170 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4177 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4178 conf->geo = conf->prev; in raid10_start_reshape()
4179 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4183 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4184 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4186 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4261 struct r10conf *conf = mddev->private; in reshape_request() local
4276 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4278 - conf->reshape_progress); in reshape_request()
4280 conf->reshape_progress > 0) in reshape_request()
4281 sector_nr = conf->reshape_progress; in reshape_request()
4298 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4299 &conf->geo); in reshape_request()
4304 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4305 &conf->prev); in reshape_request()
4307 if (next + conf->offset_diff < safe) in reshape_request()
4310 last = conf->reshape_progress - 1; in reshape_request()
4311 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4312 & conf->prev.chunk_mask); in reshape_request()
4319 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4324 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4329 if (next > safe + conf->offset_diff) in reshape_request()
4332 sector_nr = conf->reshape_progress; in reshape_request()
4333 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4334 & conf->prev.chunk_mask); in reshape_request()
4341 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4343 wait_barrier(conf); in reshape_request()
4344 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4347 - conf->reshape_progress; in reshape_request()
4349 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4350 conf->reshape_checkpoint = jiffies; in reshape_request()
4356 allow_barrier(conf); in reshape_request()
4359 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4360 allow_barrier(conf); in reshape_request()
4365 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); in reshape_request()
4367 raise_barrier(conf, sectors_done != 0); in reshape_request()
4373 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4381 mempool_free(r10_bio, conf->r10buf_pool); in reshape_request()
4402 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4407 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4412 rdev2 = conf->mirrors[d].replacement; in reshape_request()
4415 rdev2 = conf->mirrors[d].rdev; in reshape_request()
4476 conf->reshape_progress -= sectors_done; in reshape_request()
4478 conf->reshape_progress += sectors_done; in reshape_request()
4493 struct r10conf *conf = mddev->private; in reshape_request_write() local
4507 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
4512 rdev = conf->mirrors[d].replacement; in reshape_request_write()
4515 rdev = conf->mirrors[d].rdev; in reshape_request_write()
4529 static void end_reshape(struct r10conf *conf) in end_reshape() argument
4531 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4534 spin_lock_irq(&conf->device_lock); in end_reshape()
4535 conf->prev = conf->geo; in end_reshape()
4536 md_finish_reshape(conf->mddev); in end_reshape()
4538 conf->reshape_progress = MaxSector; in end_reshape()
4539 conf->reshape_safe = MaxSector; in end_reshape()
4540 spin_unlock_irq(&conf->device_lock); in end_reshape()
4545 if (conf->mddev->queue) { in end_reshape()
4546 int stripe = conf->geo.raid_disks * in end_reshape()
4547 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4548 stripe /= conf->geo.near_copies; in end_reshape()
4549 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
4550 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
4552 conf->fullsync = 0; in end_reshape()
4560 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
4563 struct r10dev devs[conf->copies]; in handle_reshape_read_error()
4571 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
4583 struct md_rdev *rdev = conf->mirrors[d].rdev; in handle_reshape_read_error()
4600 if (slot >= conf->copies) in handle_reshape_read_error()
4622 struct r10conf *conf = mddev->private; in end_reshape_write() local
4628 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4630 rdev = conf->mirrors[d].replacement; in end_reshape_write()
4633 rdev = conf->mirrors[d].rdev; in end_reshape_write()
4656 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
4673 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4674 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4676 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_finish_reshape()
4679 rdev = conf->mirrors[d].replacement; in raid10_finish_reshape()
4685 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()