Lines Matching refs:conf
69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
71 static void lower_barrier(struct r1conf *conf);
179 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
183 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
193 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
195 put_all_bios(conf, r1_bio); in free_r1bio()
196 mempool_free(r1_bio, conf->r1bio_pool); in free_r1bio()
201 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
204 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
207 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
210 mempool_free(r1_bio, conf->r1buf_pool); in put_buf()
212 lower_barrier(conf); in put_buf()
219 struct r1conf *conf = mddev->private; in reschedule_retry() local
221 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
222 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
223 conf->nr_queued ++; in reschedule_retry()
224 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
226 wake_up(&conf->wait_barrier); in reschedule_retry()
239 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio() local
245 spin_lock_irqsave(&conf->device_lock, flags); in call_bio_endio()
248 spin_unlock_irqrestore(&conf->device_lock, flags); in call_bio_endio()
253 wake_up(&conf->wait_barrier); in call_bio_endio()
265 allow_barrier(conf, start_next_window, bi_sector); in call_bio_endio()
290 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
292 conf->mirrors[disk].head_position = in update_head_pos()
302 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
303 int raid_disks = conf->raid_disks; in find_bio_disk()
320 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
336 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
337 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) in raid1_end_read_request()
341 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
346 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); in raid1_end_read_request()
355 mdname(conf->mddev), in raid1_end_read_request()
356 bdevname(conf->mirrors[mirror].rdev->bdev, in raid1_end_read_request()
405 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
415 &conf->mirrors[mirror].rdev->flags); in raid1_end_write_request()
417 &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
419 conf->mddev->recovery); in raid1_end_write_request()
446 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && in raid1_end_write_request()
447 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
451 if (is_badblock(conf->mirrors[mirror].rdev, in raid1_end_write_request()
460 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) in raid1_end_write_request()
484 rdev_dec_pending(conf->mirrors[mirror].rdev, in raid1_end_write_request()
485 conf->mddev); in raid1_end_write_request()
511 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
543 (mddev_is_clustered(conf->mddev) && in read_balance()
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, in read_balance()
550 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
557 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
618 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
624 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
627 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance()
687 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
695 rdev_dec_pending(rdev, conf->mddev); in read_balance()
700 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
701 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
703 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
715 struct r1conf *conf = mddev->private; in raid1_mergeable_bvec() local
722 for (disk = 0; disk < conf->raid_disks * 2; disk++) { in raid1_mergeable_bvec()
724 conf->mirrors[disk].rdev); in raid1_mergeable_bvec()
745 struct r1conf *conf = mddev->private; in raid1_congested() local
749 conf->pending_count >= max_queued_requests) in raid1_congested()
753 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_congested()
754 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_congested()
773 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
778 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
780 if (conf->pending_bio_list.head) { in flush_pending_writes()
782 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
783 conf->pending_count = 0; in flush_pending_writes()
784 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
787 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
788 wake_up(&conf->wait_barrier); in flush_pending_writes()
802 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
826 static void raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
828 spin_lock_irq(&conf->resync_lock); in raise_barrier()
831 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, in raise_barrier()
832 conf->resync_lock); in raise_barrier()
835 conf->barrier++; in raise_barrier()
836 conf->next_resync = sector_nr; in raise_barrier()
847 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
848 !conf->array_frozen && in raise_barrier()
849 conf->barrier < RESYNC_DEPTH && in raise_barrier()
850 conf->current_window_requests == 0 && in raise_barrier()
851 (conf->start_next_window >= in raise_barrier()
852 conf->next_resync + RESYNC_SECTORS), in raise_barrier()
853 conf->resync_lock); in raise_barrier()
855 conf->nr_pending++; in raise_barrier()
856 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
859 static void lower_barrier(struct r1conf *conf) in lower_barrier() argument
862 BUG_ON(conf->barrier <= 0); in lower_barrier()
863 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
864 conf->barrier--; in lower_barrier()
865 conf->nr_pending--; in lower_barrier()
866 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
867 wake_up(&conf->wait_barrier); in lower_barrier()
870 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) in need_to_wait_for_sync() argument
874 if (conf->array_frozen || !bio) in need_to_wait_for_sync()
876 else if (conf->barrier && bio_data_dir(bio) == WRITE) { in need_to_wait_for_sync()
877 if ((conf->mddev->curr_resync_completed in need_to_wait_for_sync()
879 (conf->next_resync + NEXT_NORMALIO_DISTANCE in need_to_wait_for_sync()
889 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) in wait_barrier() argument
893 spin_lock_irq(&conf->resync_lock); in wait_barrier()
894 if (need_to_wait_for_sync(conf, bio)) { in wait_barrier()
895 conf->nr_waiting++; in wait_barrier()
905 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
906 !conf->array_frozen && in wait_barrier()
907 (!conf->barrier || in wait_barrier()
908 ((conf->start_next_window < in wait_barrier()
909 conf->next_resync + RESYNC_SECTORS) && in wait_barrier()
912 conf->resync_lock); in wait_barrier()
913 conf->nr_waiting--; in wait_barrier()
918 conf->mddev->curr_resync_completed) { in wait_barrier()
919 if (conf->start_next_window == MaxSector) in wait_barrier()
920 conf->start_next_window = in wait_barrier()
921 conf->next_resync + in wait_barrier()
924 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) in wait_barrier()
926 conf->next_window_requests++; in wait_barrier()
928 conf->current_window_requests++; in wait_barrier()
929 sector = conf->start_next_window; in wait_barrier()
933 conf->nr_pending++; in wait_barrier()
934 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
938 static void allow_barrier(struct r1conf *conf, sector_t start_next_window, in allow_barrier() argument
943 spin_lock_irqsave(&conf->resync_lock, flags); in allow_barrier()
944 conf->nr_pending--; in allow_barrier()
946 if (start_next_window == conf->start_next_window) { in allow_barrier()
947 if (conf->start_next_window + NEXT_NORMALIO_DISTANCE in allow_barrier()
949 conf->next_window_requests--; in allow_barrier()
951 conf->current_window_requests--; in allow_barrier()
953 conf->current_window_requests--; in allow_barrier()
955 if (!conf->current_window_requests) { in allow_barrier()
956 if (conf->next_window_requests) { in allow_barrier()
957 conf->current_window_requests = in allow_barrier()
958 conf->next_window_requests; in allow_barrier()
959 conf->next_window_requests = 0; in allow_barrier()
960 conf->start_next_window += in allow_barrier()
963 conf->start_next_window = MaxSector; in allow_barrier()
966 spin_unlock_irqrestore(&conf->resync_lock, flags); in allow_barrier()
967 wake_up(&conf->wait_barrier); in allow_barrier()
970 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
983 spin_lock_irq(&conf->resync_lock); in freeze_array()
984 conf->array_frozen = 1; in freeze_array()
985 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
986 conf->nr_pending == conf->nr_queued+extra, in freeze_array()
987 conf->resync_lock, in freeze_array()
988 flush_pending_writes(conf)); in freeze_array()
989 spin_unlock_irq(&conf->resync_lock); in freeze_array()
991 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
994 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
995 conf->array_frozen = 0; in unfreeze_array()
996 wake_up(&conf->wait_barrier); in unfreeze_array()
997 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1046 struct r1conf *conf = mddev->private; in raid1_unplug() local
1050 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1051 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1052 conf->pending_count += plug->pending_cnt; in raid1_unplug()
1053 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1054 wake_up(&conf->wait_barrier); in raid1_unplug()
1063 wake_up(&conf->wait_barrier); in raid1_unplug()
1081 struct r1conf *conf = mddev->private; in make_request() local
1122 prepare_to_wait(&conf->wait_barrier, in make_request()
1132 finish_wait(&conf->wait_barrier, &w); in make_request()
1135 start_next_window = wait_barrier(conf, bio); in make_request()
1144 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1169 rdisk = read_balance(conf, r1_bio, &max_sectors); in make_request()
1176 mirror = conf->mirrors + rdisk; in make_request()
1211 spin_lock_irq(&conf->device_lock); in make_request()
1216 spin_unlock_irq(&conf->device_lock); in make_request()
1224 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1241 if (conf->pending_count >= max_queued_requests) { in make_request()
1243 wait_event(conf->wait_barrier, in make_request()
1244 conf->pending_count < max_queued_requests); in make_request()
1257 disks = conf->raid_disks * 2; in make_request()
1264 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in make_request()
1273 if (i < conf->raid_disks) in make_request()
1332 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in make_request()
1334 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); in make_request()
1336 start_next_window = wait_barrier(conf, bio); in make_request()
1344 wait_event(conf->wait_barrier, in make_request()
1354 spin_lock_irq(&conf->device_lock); in make_request()
1359 spin_unlock_irq(&conf->device_lock); in make_request()
1401 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) in make_request()
1408 conf->mirrors[i].rdev->data_offset); in make_request()
1409 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; in make_request()
1422 spin_lock_irqsave(&conf->device_lock, flags); in make_request()
1427 bio_list_add(&conf->pending_bio_list, mbio); in make_request()
1428 conf->pending_count++; in make_request()
1430 spin_unlock_irqrestore(&conf->device_lock, flags); in make_request()
1442 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in make_request()
1454 wake_up(&conf->wait_barrier); in make_request()
1459 struct r1conf *conf = mddev->private; in status() local
1462 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in status()
1463 conf->raid_disks - mddev->degraded); in status()
1465 for (i = 0; i < conf->raid_disks; i++) { in status()
1466 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in status()
1477 struct r1conf *conf = mddev->private; in error() local
1487 && (conf->raid_disks - mddev->degraded) == 1) { in error()
1494 conf->recovery_disabled = mddev->recovery_disabled; in error()
1498 spin_lock_irqsave(&conf->device_lock, flags); in error()
1504 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
1514 mdname(mddev), conf->raid_disks - mddev->degraded); in error()
1517 static void print_conf(struct r1conf *conf) in print_conf() argument
1522 if (!conf) { in print_conf()
1526 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1527 conf->raid_disks); in print_conf()
1530 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1532 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1542 static void close_sync(struct r1conf *conf) in close_sync() argument
1544 wait_barrier(conf, NULL); in close_sync()
1545 allow_barrier(conf, 0, 0); in close_sync()
1547 mempool_destroy(conf->r1buf_pool); in close_sync()
1548 conf->r1buf_pool = NULL; in close_sync()
1550 spin_lock_irq(&conf->resync_lock); in close_sync()
1551 conf->next_resync = 0; in close_sync()
1552 conf->start_next_window = MaxSector; in close_sync()
1553 conf->current_window_requests += in close_sync()
1554 conf->next_window_requests; in close_sync()
1555 conf->next_window_requests = 0; in close_sync()
1556 spin_unlock_irq(&conf->resync_lock); in close_sync()
1562 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1573 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1574 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1575 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1576 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1605 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1607 print_conf(conf); in raid1_spare_active()
1613 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1618 int last = conf->raid_disks - 1; in raid1_add_disk()
1621 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1633 p = conf->mirrors+mirror; in raid1_add_disk()
1647 conf->fullsync = 1; in raid1_add_disk()
1652 p[conf->raid_disks].rdev == NULL) { in raid1_add_disk()
1658 conf->fullsync = 1; in raid1_add_disk()
1659 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1672 freeze_array(conf, 0); in raid1_add_disk()
1673 unfreeze_array(conf); in raid1_add_disk()
1679 print_conf(conf); in raid1_add_disk()
1685 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1688 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1691 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1693 print_conf(conf); in raid1_remove_disk()
1704 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1705 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1716 } else if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1722 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1723 freeze_array(conf, 0); in raid1_remove_disk()
1726 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1727 unfreeze_array(conf); in raid1_remove_disk()
1735 print_conf(conf); in raid1_remove_disk()
1762 struct r1conf *conf = mddev->private; in end_sync_write() local
1781 &conf->mirrors[mirror].rdev->flags); in end_sync_write()
1783 &conf->mirrors[mirror].rdev->flags)) in end_sync_write()
1787 } else if (is_badblock(conf->mirrors[mirror].rdev, in end_sync_write()
1791 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1843 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
1864 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1873 if (d == conf->raid_disks * 2) in fix_sync_read_error()
1890 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
1891 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1898 conf->recovery_disabled = in fix_sync_read_error()
1916 d = conf->raid_disks * 2; in fix_sync_read_error()
1920 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1931 d = conf->raid_disks * 2; in fix_sync_read_error()
1935 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
1960 struct r1conf *conf = mddev->private; in process_checks() local
1967 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
1982 conf->mirrors[i].rdev->data_offset; in process_checks()
1983 b->bi_bdev = conf->mirrors[i].rdev->bdev; in process_checks()
1999 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2003 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2007 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2036 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2046 struct r1conf *conf = mddev->private; in sync_request_write() local
2048 int disks = conf->raid_disks * 2; in sync_request_write()
2076 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2102 static void fix_read_error(struct r1conf *conf, int read_disk, in fix_read_error() argument
2105 struct mddev *mddev = conf->mddev; in fix_read_error()
2125 rdev = conf->mirrors[d].rdev; in fix_read_error()
2133 conf->tmppage, READ, false)) in fix_read_error()
2137 if (d == conf->raid_disks * 2) in fix_read_error()
2144 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2153 d = conf->raid_disks * 2; in fix_read_error()
2155 rdev = conf->mirrors[d].rdev; in fix_read_error()
2159 conf->tmppage, WRITE); in fix_read_error()
2165 d = conf->raid_disks * 2; in fix_read_error()
2167 rdev = conf->mirrors[d].rdev; in fix_read_error()
2171 conf->tmppage, READ)) { in fix_read_error()
2191 struct r1conf *conf = mddev->private; in narrow_write_error() local
2192 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2265 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2269 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2270 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2281 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2285 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2288 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2291 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2293 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2297 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2304 md_error(conf->mddev, in handle_write_finished()
2305 conf->mirrors[m].rdev); in handle_write_finished()
2309 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2310 conf->mddev); in handle_write_finished()
2317 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2321 struct mddev *mddev = conf->mddev; in handle_read_error()
2336 freeze_array(conf, 1); in handle_read_error()
2337 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2339 unfreeze_array(conf); in handle_read_error()
2341 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); in handle_read_error()
2342 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); in handle_read_error()
2347 disk = read_balance(conf, r1_bio, &max_sectors); in handle_read_error()
2366 rdev = conf->mirrors[disk].rdev; in handle_read_error()
2384 spin_lock_irq(&conf->device_lock); in handle_read_error()
2389 spin_unlock_irq(&conf->device_lock); in handle_read_error()
2393 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); in handle_read_error()
2414 struct r1conf *conf = mddev->private; in raid1d() local
2415 struct list_head *head = &conf->retry_list; in raid1d()
2423 flush_pending_writes(conf); in raid1d()
2425 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2427 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2432 conf->nr_queued--; in raid1d()
2433 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2436 conf = mddev->private; in raid1d()
2440 handle_sync_write_finished(conf, r1_bio); in raid1d()
2445 handle_write_finished(conf, r1_bio); in raid1d()
2447 handle_read_error(conf, r1_bio); in raid1d()
2461 static int init_resync(struct r1conf *conf) in init_resync() argument
2466 BUG_ON(conf->r1buf_pool); in init_resync()
2467 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, in init_resync()
2468 conf->poolinfo); in init_resync()
2469 if (!conf->r1buf_pool) in init_resync()
2471 conf->next_resync = 0; in init_resync()
2487 struct r1conf *conf = mddev->private; in sync_request() local
2500 if (!conf->r1buf_pool) in sync_request()
2501 if (init_resync(conf)) in sync_request()
2515 conf->fullsync = 0; in sync_request()
2518 close_sync(conf); in sync_request()
2525 conf->fullsync == 0) { in sync_request()
2533 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in sync_request()
2540 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); in sync_request()
2542 raise_barrier(conf, sector_nr); in sync_request()
2559 for (i = 0; i < conf->raid_disks * 2; i++) { in sync_request()
2564 rdev = rcu_dereference(conf->mirrors[i].rdev); in sync_request()
2567 if (i < conf->raid_disks) in sync_request()
2631 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in sync_request()
2633 struct md_rdev *rdev = conf->mirrors[i].rdev; in sync_request()
2648 conf->recovery_disabled = mddev->recovery_disabled; in sync_request()
2694 !conf->fullsync && in sync_request()
2702 for (i = 0 ; i < conf->raid_disks * 2; i++) { in sync_request()
2735 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in sync_request()
2763 struct r1conf *conf; in setup_conf() local
2769 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
2770 if (!conf) in setup_conf()
2773 conf->mirrors = kzalloc(sizeof(struct raid1_info) in setup_conf()
2776 if (!conf->mirrors) in setup_conf()
2779 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
2780 if (!conf->tmppage) in setup_conf()
2783 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
2784 if (!conf->poolinfo) in setup_conf()
2786 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2787 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, in setup_conf()
2789 conf->poolinfo); in setup_conf()
2790 if (!conf->r1bio_pool) in setup_conf()
2793 conf->poolinfo->mddev = mddev; in setup_conf()
2796 spin_lock_init(&conf->device_lock); in setup_conf()
2804 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
2806 disk = conf->mirrors + disk_idx; in setup_conf()
2818 conf->raid_disks = mddev->raid_disks; in setup_conf()
2819 conf->mddev = mddev; in setup_conf()
2820 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
2822 spin_lock_init(&conf->resync_lock); in setup_conf()
2823 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
2825 bio_list_init(&conf->pending_bio_list); in setup_conf()
2826 conf->pending_count = 0; in setup_conf()
2827 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
2829 conf->start_next_window = MaxSector; in setup_conf()
2830 conf->current_window_requests = conf->next_window_requests = 0; in setup_conf()
2833 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
2835 disk = conf->mirrors + i; in setup_conf()
2837 if (i < conf->raid_disks && in setup_conf()
2838 disk[conf->raid_disks].rdev) { in setup_conf()
2845 disk[conf->raid_disks].rdev; in setup_conf()
2846 disk[conf->raid_disks].rdev = NULL; in setup_conf()
2857 conf->fullsync = 1; in setup_conf()
2862 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
2863 if (!conf->thread) { in setup_conf()
2870 return conf; in setup_conf()
2873 if (conf) { in setup_conf()
2874 if (conf->r1bio_pool) in setup_conf()
2875 mempool_destroy(conf->r1bio_pool); in setup_conf()
2876 kfree(conf->mirrors); in setup_conf()
2877 safe_put_page(conf->tmppage); in setup_conf()
2878 kfree(conf->poolinfo); in setup_conf()
2879 kfree(conf); in setup_conf()
2887 struct r1conf *conf; in run() local
2909 conf = setup_conf(mddev); in run()
2911 conf = mddev->private; in run()
2913 if (IS_ERR(conf)) in run()
2914 return PTR_ERR(conf); in run()
2929 for (i=0; i < conf->raid_disks; i++) in run()
2930 if (conf->mirrors[i].rdev == NULL || in run()
2931 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in run()
2932 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in run()
2935 if (conf->raid_disks - mddev->degraded == 1) in run()
2950 mddev->thread = conf->thread; in run()
2951 conf->thread = NULL; in run()
2952 mddev->private = conf; in run()
2968 raid1_free(mddev, conf); in run()
2975 struct r1conf *conf = priv; in raid1_free() local
2977 if (conf->r1bio_pool) in raid1_free()
2978 mempool_destroy(conf->r1bio_pool); in raid1_free()
2979 kfree(conf->mirrors); in raid1_free()
2980 safe_put_page(conf->tmppage); in raid1_free()
2981 kfree(conf->poolinfo); in raid1_free()
2982 kfree(conf); in raid1_free()
3032 struct r1conf *conf = mddev->private; in raid1_reshape() local
3053 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3055 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3056 if (conf->mirrors[d].rdev) in raid1_reshape()
3082 freeze_array(conf, 0); in raid1_reshape()
3085 oldpool = conf->r1bio_pool; in raid1_reshape()
3086 conf->r1bio_pool = newpool; in raid1_reshape()
3088 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3089 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3102 kfree(conf->mirrors); in raid1_reshape()
3103 conf->mirrors = newmirrors; in raid1_reshape()
3104 kfree(conf->poolinfo); in raid1_reshape()
3105 conf->poolinfo = newpoolinfo; in raid1_reshape()
3107 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3108 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3109 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3110 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3113 unfreeze_array(conf); in raid1_reshape()
3124 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3128 wake_up(&conf->wait_barrier); in raid1_quiesce()
3131 freeze_array(conf, 0); in raid1_quiesce()
3134 unfreeze_array(conf); in raid1_quiesce()
3145 struct r1conf *conf; in raid1_takeover() local
3149 conf = setup_conf(mddev); in raid1_takeover()
3150 if (!IS_ERR(conf)) in raid1_takeover()
3152 conf->array_frozen = 1; in raid1_takeover()
3153 return conf; in raid1_takeover()