Lines Matching refs:rdev

366 	struct md_rdev *rdev;  in raid10_end_read_request()  local
371 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
395 rdev->raid_disk)) in raid10_end_read_request()
400 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
409 bdevname(rdev->bdev, b), in raid10_end_read_request()
449 struct md_rdev *rdev = NULL; in raid10_end_write_request() local
454 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
455 if (!rdev) { in raid10_end_write_request()
458 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
468 md_error(rdev->mddev, rdev); in raid10_end_write_request()
470 set_bit(WriteErrorSeen, &rdev->flags); in raid10_end_write_request()
471 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in raid10_end_write_request()
473 &rdev->mddev->recovery); in raid10_end_write_request()
498 if (test_bit(In_sync, &rdev->flags) && in raid10_end_write_request()
499 !test_bit(Faulty, &rdev->flags)) in raid10_end_write_request()
503 if (is_badblock(rdev, in raid10_end_write_request()
524 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
731 struct md_rdev *rdev = rcu_dereference( in raid10_mergeable_bvec() local
732 conf->mirrors[disk].rdev); in raid10_mergeable_bvec()
733 if (rdev && !test_bit(Faulty, &rdev->flags)) { in raid10_mergeable_bvec()
735 bdev_get_queue(rdev->bdev); in raid10_mergeable_bvec()
738 + rdev->data_offset; in raid10_mergeable_bvec()
739 bvm->bi_bdev = rdev->bdev; in raid10_mergeable_bvec()
744 rdev = rcu_dereference(conf->mirrors[disk].replacement); in raid10_mergeable_bvec()
745 if (rdev && !test_bit(Faulty, &rdev->flags)) { in raid10_mergeable_bvec()
747 bdev_get_queue(rdev->bdev); in raid10_mergeable_bvec()
750 + rdev->data_offset; in raid10_mergeable_bvec()
751 bvm->bi_bdev = rdev->bdev; in raid10_mergeable_bvec()
790 struct md_rdev *best_rdev, *rdev = NULL; in read_balance() local
822 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
823 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || in read_balance()
824 test_bit(Unmerged, &rdev->flags) || in read_balance()
825 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
826 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
827 if (rdev == NULL || in read_balance()
828 test_bit(Faulty, &rdev->flags) || in read_balance()
829 test_bit(Unmerged, &rdev->flags)) in read_balance()
831 if (!test_bit(In_sync, &rdev->flags) && in read_balance()
832 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
836 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
857 best_rdev = rdev; in read_balance()
874 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) in read_balance()
886 best_rdev = rdev; in read_balance()
891 rdev = best_rdev; in read_balance()
895 atomic_inc(&rdev->nr_pending); in read_balance()
896 if (test_bit(Faulty, &rdev->flags)) { in read_balance()
900 rdev_dec_pending(rdev, conf->mddev); in read_balance()
905 rdev = NULL; in read_balance()
909 return rdev; in read_balance()
926 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_congested() local
927 if (rdev && !test_bit(Faulty, &rdev->flags)) { in raid10_congested()
928 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid10_congested()
1091 struct md_rdev *rdev) in choose_data_offset() argument
1093 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1095 return rdev->data_offset; in choose_data_offset()
1097 return rdev->new_data_offset; in choose_data_offset()
1226 struct md_rdev *rdev; in __make_request() local
1230 rdev = read_balance(conf, r10_bio, &max_sectors); in __make_request()
1231 if (!rdev) { in __make_request()
1242 r10_bio->devs[slot].rdev = rdev; in __make_request()
1245 choose_data_offset(r10_bio, rdev); in __make_request()
1246 read_bio->bi_bdev = rdev->bdev; in __make_request()
1314 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in __make_request() local
1317 if (rdev == rrdev) in __make_request()
1319 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { in __make_request()
1320 atomic_inc(&rdev->nr_pending); in __make_request()
1321 blocked_rdev = rdev; in __make_request()
1329 if (rdev && (test_bit(Faulty, &rdev->flags) in __make_request()
1330 || test_bit(Unmerged, &rdev->flags))) in __make_request()
1331 rdev = NULL; in __make_request()
1339 if (!rdev && !rrdev) { in __make_request()
1343 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { in __make_request()
1349 is_bad = is_badblock(rdev, dev_sector, in __make_request()
1356 atomic_inc(&rdev->nr_pending); in __make_request()
1357 set_bit(BlockedBadBlocks, &rdev->flags); in __make_request()
1358 blocked_rdev = rdev; in __make_request()
1385 if (rdev) { in __make_request()
1387 atomic_inc(&rdev->nr_pending); in __make_request()
1404 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in __make_request()
1407 struct md_rdev *rdev; in __make_request() local
1409 rdev = conf->mirrors[d].replacement; in __make_request()
1410 if (!rdev) { in __make_request()
1413 rdev = conf->mirrors[d].rdev; in __make_request()
1415 rdev_dec_pending(rdev, mddev); in __make_request()
1446 struct md_rdev *rdev = conf->mirrors[d].rdev; in __make_request() local
1454 rdev)); in __make_request()
1455 mbio->bi_bdev = rdev->bdev; in __make_request()
1484 struct md_rdev *rdev = conf->mirrors[d].replacement; in __make_request() local
1485 if (rdev == NULL) { in __make_request()
1488 rdev = conf->mirrors[d].rdev; in __make_request()
1497 r10_bio, rdev)); in __make_request()
1498 mbio->bi_bdev = rdev->bdev; in __make_request()
1597 conf->mirrors[i].rdev && in status()
1598 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); in status()
1626 struct md_rdev *rdev; in _enough() local
1628 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
1629 test_bit(In_sync, &rdev->flags)) in _enough()
1654 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
1667 if (test_bit(In_sync, &rdev->flags) in error()
1668 && !enough(conf, rdev->raid_disk)) { in error()
1675 if (test_and_clear_bit(In_sync, &rdev->flags)) in error()
1681 set_bit(Blocked, &rdev->flags); in error()
1682 set_bit(Faulty, &rdev->flags); in error()
1688 mdname(mddev), bdevname(rdev->bdev, b), in error()
1708 if (tmp->rdev) in print_conf()
1710 i, !test_bit(In_sync, &tmp->rdev->flags), in print_conf()
1711 !test_bit(Faulty, &tmp->rdev->flags), in print_conf()
1712 bdevname(tmp->rdev->bdev,b)); in print_conf()
1744 if (!tmp->rdev in raid10_spare_active()
1745 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) in raid10_spare_active()
1747 if (tmp->rdev) { in raid10_spare_active()
1752 set_bit(Faulty, &tmp->rdev->flags); in raid10_spare_active()
1754 tmp->rdev->sysfs_state); in raid10_spare_active()
1757 } else if (tmp->rdev in raid10_spare_active()
1758 && tmp->rdev->recovery_offset == MaxSector in raid10_spare_active()
1759 && !test_bit(Faulty, &tmp->rdev->flags) in raid10_spare_active()
1760 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { in raid10_spare_active()
1762 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); in raid10_spare_active()
1773 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
1780 struct request_queue *q = bdev_get_queue(rdev->bdev); in raid10_add_disk()
1787 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
1790 if (rdev->raid_disk >= 0) in raid10_add_disk()
1791 first = last = rdev->raid_disk; in raid10_add_disk()
1794 set_bit(Unmerged, &rdev->flags); in raid10_add_disk()
1798 if (rdev->saved_raid_disk >= first && in raid10_add_disk()
1799 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
1800 mirror = rdev->saved_raid_disk; in raid10_add_disk()
1807 if (p->rdev) { in raid10_add_disk()
1808 if (!test_bit(WantReplacement, &p->rdev->flags) || in raid10_add_disk()
1811 clear_bit(In_sync, &rdev->flags); in raid10_add_disk()
1812 set_bit(Replacement, &rdev->flags); in raid10_add_disk()
1813 rdev->raid_disk = mirror; in raid10_add_disk()
1816 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1817 rdev->data_offset << 9); in raid10_add_disk()
1819 rcu_assign_pointer(p->replacement, rdev); in raid10_add_disk()
1824 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1825 rdev->data_offset << 9); in raid10_add_disk()
1829 rdev->raid_disk = mirror; in raid10_add_disk()
1831 if (rdev->saved_raid_disk != mirror) in raid10_add_disk()
1833 rcu_assign_pointer(p->rdev, rdev); in raid10_add_disk()
1836 if (err == 0 && test_bit(Unmerged, &rdev->flags)) { in raid10_add_disk()
1847 clear_bit(Unmerged, &rdev->flags); in raid10_add_disk()
1849 md_integrity_add_rdev(rdev, mddev); in raid10_add_disk()
1850 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
1857 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
1861 int number = rdev->raid_disk; in raid10_remove_disk()
1866 if (rdev == p->rdev) in raid10_remove_disk()
1867 rdevp = &p->rdev; in raid10_remove_disk()
1868 else if (rdev == p->replacement) in raid10_remove_disk()
1873 if (test_bit(In_sync, &rdev->flags) || in raid10_remove_disk()
1874 atomic_read(&rdev->nr_pending)) { in raid10_remove_disk()
1881 if (!test_bit(Faulty, &rdev->flags) && in raid10_remove_disk()
1883 (!p->replacement || p->replacement == rdev) && in raid10_remove_disk()
1891 if (atomic_read(&rdev->nr_pending)) { in raid10_remove_disk()
1894 *rdevp = rdev; in raid10_remove_disk()
1898 p->rdev = p->replacement; in raid10_remove_disk()
1904 clear_bit(WantReplacement, &rdev->flags); in raid10_remove_disk()
1909 clear_bit(WantReplacement, &rdev->flags); in raid10_remove_disk()
1938 &conf->mirrors[d].rdev->corrected_errors); in end_sync_read()
1943 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in end_sync_read()
1991 struct md_rdev *rdev = NULL; in end_sync_write() local
1995 rdev = conf->mirrors[d].replacement; in end_sync_write()
1997 rdev = conf->mirrors[d].rdev; in end_sync_write()
2001 md_error(mddev, rdev); in end_sync_write()
2003 set_bit(WriteErrorSeen, &rdev->flags); in end_sync_write()
2004 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in end_sync_write()
2006 &rdev->mddev->recovery); in end_sync_write()
2009 } else if (is_badblock(rdev, in end_sync_write()
2015 rdev_dec_pending(rdev, mddev); in end_sync_write()
2114 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2116 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2118 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2119 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request_write()
2182 struct md_rdev *rdev; in fix_recovery_read_error() local
2189 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2191 ok = sync_page_io(rdev, in fix_recovery_read_error()
2197 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2199 ok = sync_page_io(rdev, in fix_recovery_read_error()
2205 set_bit(WriteErrorSeen, &rdev->flags); in fix_recovery_read_error()
2207 &rdev->flags)) in fix_recovery_read_error()
2209 &rdev->mddev->recovery); in fix_recovery_read_error()
2217 rdev_set_badblocks(rdev, addr, s, 0); in fix_recovery_read_error()
2219 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2221 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2272 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2273 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2290 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2294 unsigned int read_errors = atomic_read(&rdev->read_errors); in check_decay_read_errors()
2298 if (rdev->last_read_error.tv_sec == 0 && in check_decay_read_errors()
2299 rdev->last_read_error.tv_nsec == 0) { in check_decay_read_errors()
2301 rdev->last_read_error = cur_time_mon; in check_decay_read_errors()
2306 rdev->last_read_error.tv_sec) / 3600; in check_decay_read_errors()
2308 rdev->last_read_error = cur_time_mon; in check_decay_read_errors()
2316 atomic_set(&rdev->read_errors, 0); in check_decay_read_errors()
2318 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); in check_decay_read_errors()
2321 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, in r10_sync_page_io() argument
2327 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) in r10_sync_page_io()
2328 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) in r10_sync_page_io()
2330 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) in r10_sync_page_io()
2334 set_bit(WriteErrorSeen, &rdev->flags); in r10_sync_page_io()
2335 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in r10_sync_page_io()
2337 &rdev->mddev->recovery); in r10_sync_page_io()
2340 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2341 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2357 struct md_rdev*rdev; in fix_read_error() local
2364 rdev = conf->mirrors[d].rdev; in fix_read_error()
2366 if (test_bit(Faulty, &rdev->flags)) in fix_read_error()
2371 check_decay_read_errors(mddev, rdev); in fix_read_error()
2372 atomic_inc(&rdev->read_errors); in fix_read_error()
2373 if (atomic_read(&rdev->read_errors) > max_read_errors) { in fix_read_error()
2375 bdevname(rdev->bdev, b); in fix_read_error()
2381 atomic_read(&rdev->read_errors), max_read_errors); in fix_read_error()
2385 md_error(mddev, conf->mirrors[d].rdev); in fix_read_error()
2405 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2406 if (rdev && in fix_read_error()
2407 !test_bit(Unmerged, &rdev->flags) && in fix_read_error()
2408 test_bit(In_sync, &rdev->flags) && in fix_read_error()
2409 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2411 atomic_inc(&rdev->nr_pending); in fix_read_error()
2413 success = sync_page_io(rdev, in fix_read_error()
2418 rdev_dec_pending(rdev, mddev); in fix_read_error()
2435 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2438 rdev, in fix_read_error()
2442 md_error(mddev, rdev); in fix_read_error()
2459 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2460 if (!rdev || in fix_read_error()
2461 test_bit(Unmerged, &rdev->flags) || in fix_read_error()
2462 !test_bit(In_sync, &rdev->flags)) in fix_read_error()
2465 atomic_inc(&rdev->nr_pending); in fix_read_error()
2467 if (r10_sync_page_io(rdev, in fix_read_error()
2481 rdev)), in fix_read_error()
2482 bdevname(rdev->bdev, b)); in fix_read_error()
2486 bdevname(rdev->bdev, b)); in fix_read_error()
2488 rdev_dec_pending(rdev, mddev); in fix_read_error()
2499 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2500 if (!rdev || in fix_read_error()
2501 !test_bit(In_sync, &rdev->flags)) in fix_read_error()
2504 atomic_inc(&rdev->nr_pending); in fix_read_error()
2506 switch (r10_sync_page_io(rdev, in fix_read_error()
2520 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2521 bdevname(rdev->bdev, b)); in fix_read_error()
2525 bdevname(rdev->bdev, b)); in fix_read_error()
2534 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2535 bdevname(rdev->bdev, b)); in fix_read_error()
2536 atomic_add(s, &rdev->corrected_errors); in fix_read_error()
2539 rdev_dec_pending(rdev, mddev); in fix_read_error()
2554 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error() local
2572 if (rdev->badblocks.shift < 0) in narrow_write_error()
2575 block_sectors = roundup(1 << rdev->badblocks.shift, in narrow_write_error()
2576 bdev_logical_block_size(rdev->bdev) >> 9); in narrow_write_error()
2590 choose_data_offset(r10_bio, rdev) + in narrow_write_error()
2592 wbio->bi_bdev = rdev->bdev; in narrow_write_error()
2595 ok = rdev_set_badblocks(rdev, sector, in narrow_write_error()
2612 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error() local
2637 rdev_dec_pending(rdev, mddev); in handle_read_error()
2640 rdev = read_balance(conf, r10_bio, &max_sectors); in handle_read_error()
2641 if (rdev == NULL) { in handle_read_error()
2657 bdevname(rdev->bdev, b), in handle_read_error()
2663 r10_bio->devs[slot].rdev = rdev; in handle_read_error()
2665 + choose_data_offset(r10_bio, rdev); in handle_read_error()
2666 bio->bi_bdev = rdev->bdev; in handle_read_error()
2710 struct md_rdev *rdev; in handle_write_completed() local
2716 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2722 rdev, in handle_write_completed()
2727 rdev, in handle_write_completed()
2730 md_error(conf->mddev, rdev); in handle_write_completed()
2732 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2738 rdev, in handle_write_completed()
2743 rdev, in handle_write_completed()
2746 md_error(conf->mddev, rdev); in handle_write_completed()
2754 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2757 rdev, in handle_write_completed()
2760 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2764 md_error(conf->mddev, rdev); in handle_write_completed()
2768 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2771 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2772 if (rdev && bio == IO_MADE_GOOD) { in handle_write_completed()
2774 rdev, in handle_write_completed()
2777 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3027 if ((mirror->rdev == NULL || in sync_request()
3028 test_bit(In_sync, &mirror->rdev->flags)) in sync_request()
3081 if (conf->mirrors[j].rdev == NULL || in sync_request()
3082 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { in sync_request()
3095 struct md_rdev *rdev; in sync_request() local
3098 if (!conf->mirrors[d].rdev || in sync_request()
3099 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) in sync_request()
3103 rdev = conf->mirrors[d].rdev; in sync_request()
3106 if (is_badblock(rdev, sector, max_sync, in sync_request()
3127 rdev->data_offset; in sync_request()
3128 bio->bi_bdev = rdev->bdev; in sync_request()
3129 atomic_inc(&rdev->nr_pending); in sync_request()
3142 rdev = mirror->rdev; in sync_request()
3143 if (!test_bit(In_sync, &rdev->flags)) { in sync_request()
3152 + rdev->data_offset; in sync_request()
3153 bio->bi_bdev = rdev->bdev; in sync_request()
3162 rdev = mirror->replacement; in sync_request()
3171 if (rdev == NULL || bio == NULL || in sync_request()
3172 test_bit(Faulty, &rdev->flags)) in sync_request()
3181 rdev->data_offset; in sync_request()
3182 bio->bi_bdev = rdev->bdev; in sync_request()
3198 &mirror->rdev->flags) in sync_request()
3200 mirror->rdev, in sync_request()
3277 if (conf->mirrors[d].rdev == NULL || in sync_request()
3278 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) in sync_request()
3281 if (is_badblock(conf->mirrors[d].rdev, in sync_request()
3293 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3301 conf->mirrors[d].rdev->data_offset; in sync_request()
3302 bio->bi_bdev = conf->mirrors[d].rdev->bdev; in sync_request()
3316 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request()
3332 rdev_dec_pending(conf->mirrors[d].rdev, in sync_request()
3602 struct md_rdev *rdev; in run() local
3634 rdev_for_each(rdev, mddev) { in run()
3638 disk_idx = rdev->raid_disk; in run()
3646 if (test_bit(Replacement, &rdev->flags)) { in run()
3649 disk->replacement = rdev; in run()
3651 if (disk->rdev) in run()
3653 disk->rdev = rdev; in run()
3655 q = bdev_get_queue(rdev->bdev); in run()
3658 diff = (rdev->new_data_offset - rdev->data_offset); in run()
3667 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
3668 rdev->data_offset << 9); in run()
3672 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in run()
3709 if (!disk->rdev && disk->replacement) { in run()
3711 disk->rdev = disk->replacement; in run()
3713 clear_bit(Replacement, &disk->rdev->flags); in run()
3716 if (!disk->rdev || in run()
3717 !test_bit(In_sync, &disk->rdev->flags)) { in run()
3720 if (disk->rdev && in run()
3721 disk->rdev->saved_raid_disk < 0) in run()
3872 struct md_rdev *rdev; in raid10_takeover_raid0() local
3895 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
3896 if (rdev->raid_disk >= 0) { in raid10_takeover_raid0()
3897 rdev->new_raid_disk = rdev->raid_disk * 2; in raid10_takeover_raid0()
3898 rdev->sectors = size; in raid10_takeover_raid0()
4002 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded() local
4003 if (!rdev || test_bit(Faulty, &rdev->flags)) in calc_degraded()
4005 else if (!test_bit(In_sync, &rdev->flags)) in calc_degraded()
4018 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded() local
4019 if (!rdev || test_bit(Faulty, &rdev->flags)) in calc_degraded()
4021 else if (!test_bit(In_sync, &rdev->flags)) { in calc_degraded()
4054 struct md_rdev *rdev; in raid10_start_reshape() local
4069 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4070 if (!test_bit(In_sync, &rdev->flags) in raid10_start_reshape()
4071 && !test_bit(Faulty, &rdev->flags)) in raid10_start_reshape()
4073 if (rdev->raid_disk >= 0) { in raid10_start_reshape()
4074 long long diff = (rdev->new_data_offset in raid10_start_reshape()
4075 - rdev->data_offset); in raid10_start_reshape()
4128 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4129 if (rdev->raid_disk < 0 && in raid10_start_reshape()
4130 !test_bit(Faulty, &rdev->flags)) { in raid10_start_reshape()
4131 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4132 if (rdev->raid_disk >= in raid10_start_reshape()
4134 set_bit(In_sync, &rdev->flags); in raid10_start_reshape()
4136 rdev->recovery_offset = 0; in raid10_start_reshape()
4138 if (sysfs_link_rdev(mddev, rdev)) in raid10_start_reshape()
4141 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4142 && !test_bit(Faulty, &rdev->flags)) { in raid10_start_reshape()
4144 set_bit(In_sync, &rdev->flags); in raid10_start_reshape()
4180 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4181 rdev->new_data_offset = rdev->data_offset; in raid10_start_reshape()
4267 struct md_rdev *rdev; in reshape_request() local
4373 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4376 if (!rdev) { in reshape_request()
4388 read_bio->bi_bdev = rdev->bdev; in reshape_request()
4390 + rdev->data_offset); in reshape_request()
4415 rdev2 = conf->mirrors[d].rdev; in reshape_request()
4510 struct md_rdev *rdev; in reshape_request_write() local
4512 rdev = conf->mirrors[d].replacement; in reshape_request_write()
4515 rdev = conf->mirrors[d].rdev; in reshape_request_write()
4518 if (!rdev || test_bit(Faulty, &rdev->flags)) in reshape_request_write()
4520 atomic_inc(&rdev->nr_pending); in reshape_request_write()
4583 struct md_rdev *rdev = conf->mirrors[d].rdev; in handle_reshape_read_error() local
4585 if (rdev == NULL || in handle_reshape_read_error()
4586 test_bit(Faulty, &rdev->flags) || in handle_reshape_read_error()
4587 !test_bit(In_sync, &rdev->flags)) in handle_reshape_read_error()
4591 success = sync_page_io(rdev, in handle_reshape_read_error()
4626 struct md_rdev *rdev = NULL; in end_reshape_write() local
4630 rdev = conf->mirrors[d].replacement; in end_reshape_write()
4631 if (!rdev) { in end_reshape_write()
4633 rdev = conf->mirrors[d].rdev; in end_reshape_write()
4638 md_error(mddev, rdev); in end_reshape_write()
4641 rdev_dec_pending(rdev, mddev); in end_reshape_write()
4676 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_finish_reshape() local
4677 if (rdev) in raid10_finish_reshape()
4678 clear_bit(In_sync, &rdev->flags); in raid10_finish_reshape()
4679 rdev = conf->mirrors[d].replacement; in raid10_finish_reshape()
4680 if (rdev) in raid10_finish_reshape()
4681 clear_bit(In_sync, &rdev->flags); in raid10_finish_reshape()