Lines Matching refs:mddev
122 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
134 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { in r1buf_pool_alloc()
193 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
201 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
207 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
218 struct mddev *mddev = r1_bio->mddev; in reschedule_retry() local
219 struct r1conf *conf = mddev->private; in reschedule_retry()
227 md_wakeup_thread(mddev->thread); in reschedule_retry()
239 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio()
290 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
302 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
320 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
337 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
346 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); in raid1_end_read_request()
355 mdname(conf->mddev), in raid1_end_read_request()
377 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
381 md_write_end(r1_bio->mddev); in close_write()
405 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
419 conf->mddev->recovery); in raid1_end_write_request()
485 conf->mddev); in raid1_end_write_request()
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
543 (mddev_is_clustered(conf->mddev) && in read_balance()
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, in read_balance()
695 rdev_dec_pending(rdev, conf->mddev); in read_balance()
711 static int raid1_mergeable_bvec(struct mddev *mddev, in raid1_mergeable_bvec() argument
715 struct r1conf *conf = mddev->private; in raid1_mergeable_bvec()
719 if (mddev->merge_check_needed) { in raid1_mergeable_bvec()
743 static int raid1_congested(struct mddev *mddev, int bits) in raid1_congested() argument
745 struct r1conf *conf = mddev->private; in raid1_congested()
787 bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
877 if ((conf->mddev->curr_resync_completed in need_to_wait_for_sync()
918 conf->mddev->curr_resync_completed) { in wait_barrier()
1045 struct mddev *mddev = plug->cb.data; in raid1_unplug() local
1046 struct r1conf *conf = mddev->private; in raid1_unplug()
1055 md_wakeup_thread(mddev->thread); in raid1_unplug()
1062 bitmap_unplug(mddev->bitmap); in raid1_unplug()
1079 static void make_request(struct mddev *mddev, struct bio * bio) in make_request() argument
1081 struct r1conf *conf = mddev->private; in make_request()
1108 md_write_start(mddev, bio); /* wait on superblock update early */ in make_request()
1111 ((bio_end_sector(bio) > mddev->suspend_lo && in make_request()
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || in make_request()
1113 (mddev_is_clustered(mddev) && in make_request()
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { in make_request()
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || in make_request()
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || in make_request()
1126 (mddev_is_clustered(mddev) && in make_request()
1127 !md_cluster_ops->area_resyncing(mddev, in make_request()
1137 bitmap = mddev->bitmap; in make_request()
1149 r1_bio->mddev = mddev; in make_request()
1190 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); in make_request()
1229 r1_bio->mddev = mddev; in make_request()
1242 md_wakeup_thread(mddev->thread); in make_request()
1302 rdev_dec_pending(rdev, mddev); in make_request()
1332 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in make_request()
1335 md_wait_for_blocked_rdev(blocked_rdev, mddev); in make_request()
1372 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); in make_request()
1382 < mddev->bitmap_info.max_write_behind) && in make_request()
1417 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); in make_request()
1432 md_wakeup_thread(mddev->thread); in make_request()
1446 r1_bio->mddev = mddev; in make_request()
1457 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument
1459 struct r1conf *conf = mddev->private; in status()
1463 conf->raid_disks - mddev->degraded); in status()
1474 static void error(struct mddev *mddev, struct md_rdev *rdev) in error() argument
1477 struct r1conf *conf = mddev->private; in error()
1487 && (conf->raid_disks - mddev->degraded) == 1) { in error()
1494 conf->recovery_disabled = mddev->recovery_disabled; in error()
1500 mddev->degraded++; in error()
1508 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
1509 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
1513 mdname(mddev), bdevname(rdev->bdev, b), in error()
1514 mdname(mddev), conf->raid_disks - mddev->degraded); in error()
1526 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1559 static int raid1_spare_active(struct mddev *mddev) in raid1_spare_active() argument
1562 struct r1conf *conf = mddev->private; in raid1_spare_active()
1604 mddev->degraded -= count; in raid1_spare_active()
1611 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_add_disk() argument
1613 struct r1conf *conf = mddev->private; in raid1_add_disk()
1621 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1629 mddev->merge_check_needed = 1; in raid1_add_disk()
1636 if (mddev->gendisk) in raid1_add_disk()
1637 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_add_disk()
1676 md_integrity_add_rdev(rdev, mddev); in raid1_add_disk()
1677 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk()
1678 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid1_add_disk()
1683 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_remove_disk() argument
1685 struct r1conf *conf = mddev->private; in raid1_remove_disk()
1704 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1705 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1731 err = md_integrity_register(mddev); in raid1_remove_disk()
1761 struct mddev *mddev = r1_bio->mddev; in end_sync_write() local
1762 struct r1conf *conf = mddev->private; in end_sync_write()
1775 bitmap_end_sync(mddev->bitmap, s, in end_sync_write()
1785 mddev->recovery); in end_sync_write()
1805 md_done_sync(mddev, s, uptodate); in end_sync_write()
1821 rdev->mddev->recovery); in r1_sync_page_io()
1825 md_error(rdev->mddev, rdev); in r1_sync_page_io()
1842 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error() local
1843 struct r1conf *conf = mddev->private; in fix_sync_read_error()
1887 mdname(mddev), in fix_sync_read_error()
1899 mddev->recovery_disabled; in fix_sync_read_error()
1900 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in fix_sync_read_error()
1901 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
1925 rdev_dec_pending(rdev, mddev); in fix_sync_read_error()
1959 struct mddev *mddev = r1_bio->mddev; in process_checks() local
1960 struct r1conf *conf = mddev->private; in process_checks()
2003 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2031 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2032 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) in process_checks()
2036 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2044 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2046 struct r1conf *conf = mddev->private; in sync_request_write()
2058 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request_write()
2070 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) in sync_request_write()
2089 md_done_sync(mddev, s, 1); in sync_request_write()
2105 struct mddev *mddev = conf->mddev; in fix_read_error() local
2146 md_error(mddev, rdev); in fix_read_error()
2176 mdname(mddev), s, in fix_read_error()
2190 struct mddev *mddev = r1_bio->mddev; in narrow_write_error() local
2191 struct r1conf *conf = mddev->private; in narrow_write_error()
2236 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); in narrow_write_error()
2241 wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); in narrow_write_error()
2281 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2285 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2297 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2304 md_error(conf->mddev, in handle_write_finished()
2310 conf->mddev); in handle_write_finished()
2321 struct mddev *mddev = conf->mddev; in handle_read_error() local
2335 if (mddev->ro == 0) { in handle_read_error()
2341 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); in handle_read_error()
2342 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); in handle_read_error()
2351 mdname(mddev), b, (unsigned long long)r1_bio->sector); in handle_read_error()
2358 mddev->ro ? IO_BLOCKED : NULL; in handle_read_error()
2362 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); in handle_read_error()
2370 mdname(mddev), in handle_read_error()
2399 r1_bio->mddev = mddev; in handle_read_error()
2411 struct mddev *mddev = thread->mddev; in raid1d() local
2414 struct r1conf *conf = mddev->private; in raid1d()
2418 md_check_recovery(mddev); in raid1d()
2435 mddev = r1_bio->mddev; in raid1d()
2436 conf = mddev->private; in raid1d()
2442 sync_request_write(mddev, r1_bio); in raid1d()
2455 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) in raid1d()
2456 md_check_recovery(mddev); in raid1d()
2485 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in sync_request() argument
2487 struct r1conf *conf = mddev->private; in sync_request()
2504 max_sector = mddev->dev_sectors; in sync_request()
2511 if (mddev->curr_resync < max_sector) /* aborted */ in sync_request()
2512 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
2517 bitmap_close_sync(mddev->bitmap); in sync_request()
2522 if (mddev->bitmap == NULL && in sync_request()
2523 mddev->recovery_cp == MaxSector && in sync_request()
2524 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
2532 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in sync_request()
2533 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in sync_request()
2539 bitmap_cond_end_sync(mddev->bitmap, sector_nr); in sync_request()
2554 r1_bio->mddev = mddev; in sync_request()
2601 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in sync_request()
2602 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in sync_request()
2638 set_bit(MD_CHANGE_DEVS, &mddev->flags); in sync_request()
2648 conf->recovery_disabled = mddev->recovery_disabled; in sync_request()
2649 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in sync_request()
2661 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) in sync_request()
2678 if (max_sector > mddev->resync_max) in sync_request()
2679 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in sync_request()
2692 if (!bitmap_start_sync(mddev->bitmap, sector_nr, in sync_request()
2695 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request()
2733 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in sync_request()
2753 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid1_size() argument
2758 return mddev->dev_sectors; in raid1_size()
2761 static struct r1conf *setup_conf(struct mddev *mddev) in setup_conf() argument
2774 * mddev->raid_disks * 2, in setup_conf()
2786 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2793 conf->poolinfo->mddev = mddev; in setup_conf()
2797 rdev_for_each(rdev, mddev) { in setup_conf()
2800 if (disk_idx >= mddev->raid_disks in setup_conf()
2804 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
2813 mddev->merge_check_needed = 1; in setup_conf()
2818 conf->raid_disks = mddev->raid_disks; in setup_conf()
2819 conf->mddev = mddev; in setup_conf()
2827 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
2862 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
2866 mdname(mddev)); in setup_conf()
2884 static void raid1_free(struct mddev *mddev, void *priv);
2885 static int run(struct mddev *mddev) in run() argument
2893 if (mddev->level != 1) { in run()
2895 mdname(mddev), mddev->level); in run()
2898 if (mddev->reshape_position != MaxSector) { in run()
2900 mdname(mddev)); in run()
2908 if (mddev->private == NULL) in run()
2909 conf = setup_conf(mddev); in run()
2911 conf = mddev->private; in run()
2916 if (mddev->queue) in run()
2917 blk_queue_max_write_same_sectors(mddev->queue, 0); in run()
2919 rdev_for_each(rdev, mddev) { in run()
2920 if (!mddev->gendisk) in run()
2922 disk_stack_limits(mddev->gendisk, rdev->bdev, in run()
2928 mddev->degraded = 0; in run()
2933 mddev->degraded++; in run()
2935 if (conf->raid_disks - mddev->degraded == 1) in run()
2936 mddev->recovery_cp = MaxSector; in run()
2938 if (mddev->recovery_cp != MaxSector) in run()
2941 mdname(mddev)); in run()
2944 mdname(mddev), mddev->raid_disks - mddev->degraded, in run()
2945 mddev->raid_disks); in run()
2950 mddev->thread = conf->thread; in run()
2952 mddev->private = conf; in run()
2954 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); in run()
2956 if (mddev->queue) { in run()
2959 mddev->queue); in run()
2962 mddev->queue); in run()
2965 ret = md_integrity_register(mddev); in run()
2967 md_unregister_thread(&mddev->thread); in run()
2968 raid1_free(mddev, conf); in run()
2973 static void raid1_free(struct mddev *mddev, void *priv) in raid1_free() argument
2985 static int raid1_resize(struct mddev *mddev, sector_t sectors) in raid1_resize() argument
2994 sector_t newsize = raid1_size(mddev, sectors, 0); in raid1_resize()
2995 if (mddev->external_size && in raid1_resize()
2996 mddev->array_sectors > newsize) in raid1_resize()
2998 if (mddev->bitmap) { in raid1_resize()
2999 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid1_resize()
3003 md_set_array_sectors(mddev, newsize); in raid1_resize()
3004 set_capacity(mddev->gendisk, mddev->array_sectors); in raid1_resize()
3005 revalidate_disk(mddev->gendisk); in raid1_resize()
3006 if (sectors > mddev->dev_sectors && in raid1_resize()
3007 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3008 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3009 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_resize()
3011 mddev->dev_sectors = sectors; in raid1_resize()
3012 mddev->resync_max_sectors = sectors; in raid1_resize()
3016 static int raid1_reshape(struct mddev *mddev) in raid1_reshape() argument
3032 struct r1conf *conf = mddev->private; in raid1_reshape()
3038 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3039 mddev->layout != mddev->new_layout || in raid1_reshape()
3040 mddev->level != mddev->new_level) { in raid1_reshape()
3041 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
3042 mddev->new_layout = mddev->layout; in raid1_reshape()
3043 mddev->new_level = mddev->level; in raid1_reshape()
3047 err = md_allow_write(mddev); in raid1_reshape()
3051 raid_disks = mddev->raid_disks + mddev->delta_disks; in raid1_reshape()
3065 newpoolinfo->mddev = mddev; in raid1_reshape()
3091 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3093 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3094 if (sysfs_link_rdev(mddev, rdev)) in raid1_reshape()
3097 mdname(mddev), rdev->raid_disk); in raid1_reshape()
3108 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3110 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3111 mddev->delta_disks = 0; in raid1_reshape()
3115 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_reshape()
3116 md_wakeup_thread(mddev->thread); in raid1_reshape()
3122 static void raid1_quiesce(struct mddev *mddev, int state) in raid1_quiesce() argument
3124 struct r1conf *conf = mddev->private; in raid1_quiesce()
3139 static void *raid1_takeover(struct mddev *mddev) in raid1_takeover() argument
3144 if (mddev->level == 5 && mddev->raid_disks == 2) { in raid1_takeover()
3146 mddev->new_level = 1; in raid1_takeover()
3147 mddev->new_layout = 0; in raid1_takeover()
3148 mddev->new_chunk_sectors = 0; in raid1_takeover()
3149 conf = setup_conf(mddev); in raid1_takeover()