Lines Matching refs:mddev
79 static int remove_and_add_spares(struct mddev *mddev,
81 static void mddev_detach(struct mddev *mddev);
104 static inline int speed_min(struct mddev *mddev) in speed_min() argument
106 return mddev->sync_speed_min ? in speed_min()
107 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
110 static inline int speed_max(struct mddev *mddev) in speed_max() argument
112 return mddev->sync_speed_max ? in speed_max()
113 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
165 struct mddev *mddev) in bio_alloc_mddev() argument
169 if (!mddev || !mddev->bio_set) in bio_alloc_mddev()
172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); in bio_alloc_mddev()
180 struct mddev *mddev) in bio_clone_mddev() argument
182 if (!mddev || !mddev->bio_set) in bio_clone_mddev()
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); in bio_clone_mddev()
201 void md_new_event(struct mddev *mddev) in md_new_event() argument
211 static void md_new_event_inintr(struct mddev *mddev) in md_new_event_inintr() argument
237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
256 struct mddev *mddev = q->queuedata; in md_make_request() local
260 if (mddev == NULL || mddev->pers == NULL in md_make_request()
261 || !mddev->ready) { in md_make_request()
265 if (mddev->ro == 1 && unlikely(rw == WRITE)) { in md_make_request()
271 if (mddev->suspended) { in md_make_request()
274 prepare_to_wait(&mddev->sb_wait, &__wait, in md_make_request()
276 if (!mddev->suspended) in md_make_request()
282 finish_wait(&mddev->sb_wait, &__wait); in md_make_request()
284 atomic_inc(&mddev->active_io); in md_make_request()
294 mddev->pers->make_request(mddev, bio); in md_make_request()
297 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); in md_make_request()
298 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); in md_make_request()
301 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) in md_make_request()
302 wake_up(&mddev->sb_wait); in md_make_request()
311 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
313 BUG_ON(mddev->suspended); in mddev_suspend()
314 mddev->suspended = 1; in mddev_suspend()
316 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); in mddev_suspend()
317 mddev->pers->quiesce(mddev, 1); in mddev_suspend()
319 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
323 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
325 mddev->suspended = 0; in mddev_resume()
326 wake_up(&mddev->sb_wait); in mddev_resume()
327 mddev->pers->quiesce(mddev, 0); in mddev_resume()
329 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
330 md_wakeup_thread(mddev->thread); in mddev_resume()
331 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
335 int mddev_congested(struct mddev *mddev, int bits) in mddev_congested() argument
337 struct md_personality *pers = mddev->pers; in mddev_congested()
341 if (mddev->suspended) in mddev_congested()
344 ret = pers->congested(mddev, bits); in mddev_congested()
351 struct mddev *mddev = data; in md_congested() local
352 return mddev_congested(mddev, bits); in md_congested()
359 struct mddev *mddev = q->queuedata; in md_mergeable_bvec() local
362 if (mddev->suspended) { in md_mergeable_bvec()
369 struct md_personality *pers = mddev->pers; in md_mergeable_bvec()
371 ret = pers->mergeable_bvec(mddev, bvm, biovec); in md_mergeable_bvec()
385 struct mddev *mddev = rdev->mddev; in md_end_flush() local
387 rdev_dec_pending(rdev, mddev); in md_end_flush()
389 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
391 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
400 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
403 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
404 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
406 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
417 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); in submit_flushes()
421 atomic_inc(&mddev->flush_pending); in submit_flushes()
424 rdev_dec_pending(rdev, mddev); in submit_flushes()
427 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
428 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
433 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
434 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
441 mddev->pers->make_request(mddev, bio); in md_submit_flush_data()
444 mddev->flush_bio = NULL; in md_submit_flush_data()
445 wake_up(&mddev->sb_wait); in md_submit_flush_data()
448 void md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
450 spin_lock_irq(&mddev->lock); in md_flush_request()
451 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
452 !mddev->flush_bio, in md_flush_request()
453 mddev->lock); in md_flush_request()
454 mddev->flush_bio = bio; in md_flush_request()
455 spin_unlock_irq(&mddev->lock); in md_flush_request()
457 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
458 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
464 struct mddev *mddev = cb->data; in md_unplug() local
465 md_wakeup_thread(mddev->thread); in md_unplug()
470 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
472 atomic_inc(&mddev->active); in mddev_get()
473 return mddev; in mddev_get()
478 static void mddev_put(struct mddev *mddev) in mddev_put() argument
482 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
484 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
485 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
488 list_del_init(&mddev->all_mddevs); in mddev_put()
489 bs = mddev->bio_set; in mddev_put()
490 mddev->bio_set = NULL; in mddev_put()
491 if (mddev->gendisk) { in mddev_put()
497 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
498 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
500 kfree(mddev); in mddev_put()
507 void mddev_init(struct mddev *mddev) in mddev_init() argument
509 mutex_init(&mddev->open_mutex); in mddev_init()
510 mutex_init(&mddev->reconfig_mutex); in mddev_init()
511 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
512 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
513 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
514 init_timer(&mddev->safemode_timer); in mddev_init()
515 atomic_set(&mddev->active, 1); in mddev_init()
516 atomic_set(&mddev->openers, 0); in mddev_init()
517 atomic_set(&mddev->active_io, 0); in mddev_init()
518 spin_lock_init(&mddev->lock); in mddev_init()
519 atomic_set(&mddev->flush_pending, 0); in mddev_init()
520 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
521 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
522 mddev->reshape_position = MaxSector; in mddev_init()
523 mddev->reshape_backwards = 0; in mddev_init()
524 mddev->last_sync_action = "none"; in mddev_init()
525 mddev->resync_min = 0; in mddev_init()
526 mddev->resync_max = MaxSector; in mddev_init()
527 mddev->level = LEVEL_NONE; in mddev_init()
531 static struct mddev *mddev_find(dev_t unit) in mddev_find()
533 struct mddev *mddev, *new = NULL; in mddev_find() local
542 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find()
543 if (mddev->unit == unit) { in mddev_find()
544 mddev_get(mddev); in mddev_find()
547 return mddev; in mddev_find()
575 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find()
576 if (mddev->unit == dev) { in mddev_find()
607 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
609 if (mddev->to_remove) { in mddev_unlock()
622 struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
623 mddev->to_remove = NULL; in mddev_unlock()
624 mddev->sysfs_active = 1; in mddev_unlock()
625 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
627 if (mddev->kobj.sd) { in mddev_unlock()
629 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
630 if (mddev->pers == NULL || in mddev_unlock()
631 mddev->pers->sync_request == NULL) { in mddev_unlock()
632 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
633 if (mddev->sysfs_action) in mddev_unlock()
634 sysfs_put(mddev->sysfs_action); in mddev_unlock()
635 mddev->sysfs_action = NULL; in mddev_unlock()
638 mddev->sysfs_active = 0; in mddev_unlock()
640 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
646 md_wakeup_thread(mddev->thread); in mddev_unlock()
651 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
655 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
663 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
667 rdev_for_each(rdev, mddev) in find_rdev()
674 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) in find_rdev_rcu() argument
678 rdev_for_each_rcu(rdev, mddev) in find_rdev_rcu()
736 struct mddev *mddev = rdev->mddev; in super_written() local
742 md_error(mddev, rdev); in super_written()
745 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
746 wake_up(&mddev->sb_wait); in super_written()
750 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
759 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); in md_super_write()
767 atomic_inc(&mddev->pending_writes); in md_super_write()
771 void md_super_wait(struct mddev *mddev) in md_super_wait() argument
774 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
780 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); in sync_page_io()
787 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
788 (rdev->mddev->reshape_backwards == in sync_page_io()
789 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
930 int (*validate_super)(struct mddev *mddev,
932 void (*sync_super)(struct mddev *mddev,
948 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
950 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
953 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1060 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1072 if (mddev->raid_disks == 0) { in super_90_validate()
1073 mddev->major_version = 0; in super_90_validate()
1074 mddev->minor_version = sb->minor_version; in super_90_validate()
1075 mddev->patch_version = sb->patch_version; in super_90_validate()
1076 mddev->external = 0; in super_90_validate()
1077 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1078 mddev->ctime = sb->ctime; in super_90_validate()
1079 mddev->utime = sb->utime; in super_90_validate()
1080 mddev->level = sb->level; in super_90_validate()
1081 mddev->clevel[0] = 0; in super_90_validate()
1082 mddev->layout = sb->layout; in super_90_validate()
1083 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1084 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1085 mddev->events = ev1; in super_90_validate()
1086 mddev->bitmap_info.offset = 0; in super_90_validate()
1087 mddev->bitmap_info.space = 0; in super_90_validate()
1089 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1090 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1091 mddev->reshape_backwards = 0; in super_90_validate()
1093 if (mddev->minor_version >= 91) { in super_90_validate()
1094 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1095 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1096 mddev->new_level = sb->new_level; in super_90_validate()
1097 mddev->new_layout = sb->new_layout; in super_90_validate()
1098 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1099 if (mddev->delta_disks < 0) in super_90_validate()
1100 mddev->reshape_backwards = 1; in super_90_validate()
1102 mddev->reshape_position = MaxSector; in super_90_validate()
1103 mddev->delta_disks = 0; in super_90_validate()
1104 mddev->new_level = mddev->level; in super_90_validate()
1105 mddev->new_layout = mddev->layout; in super_90_validate()
1106 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1110 mddev->recovery_cp = MaxSector; in super_90_validate()
1114 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1116 mddev->recovery_cp = 0; in super_90_validate()
1119 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1120 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1121 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1122 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1124 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1127 mddev->bitmap_info.file == NULL) { in super_90_validate()
1128 mddev->bitmap_info.offset = in super_90_validate()
1129 mddev->bitmap_info.default_offset; in super_90_validate()
1130 mddev->bitmap_info.space = in super_90_validate()
1131 mddev->bitmap_info.default_space; in super_90_validate()
1134 } else if (mddev->pers == NULL) { in super_90_validate()
1140 if (ev1 < mddev->events) in super_90_validate()
1142 } else if (mddev->bitmap) { in super_90_validate()
1146 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1148 if (ev1 < mddev->events) in super_90_validate()
1151 if (ev1 < mddev->events) in super_90_validate()
1156 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1170 if (mddev->minor_version >= 91) { in super_90_validate()
1185 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1189 int next_spare = mddev->raid_disks; in super_90_sync()
1211 sb->major_version = mddev->major_version; in super_90_sync()
1212 sb->patch_version = mddev->patch_version; in super_90_sync()
1214 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1215 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1216 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1217 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1219 sb->ctime = mddev->ctime; in super_90_sync()
1220 sb->level = mddev->level; in super_90_sync()
1221 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1222 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1223 sb->md_minor = mddev->md_minor; in super_90_sync()
1225 sb->utime = mddev->utime; in super_90_sync()
1227 sb->events_hi = (mddev->events>>32); in super_90_sync()
1228 sb->events_lo = (u32)mddev->events; in super_90_sync()
1230 if (mddev->reshape_position == MaxSector) in super_90_sync()
1234 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1235 sb->new_level = mddev->new_level; in super_90_sync()
1236 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1237 sb->new_layout = mddev->new_layout; in super_90_sync()
1238 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1240 mddev->minor_version = sb->minor_version; in super_90_sync()
1241 if (mddev->in_sync) in super_90_sync()
1243 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1244 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1245 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1246 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1251 sb->layout = mddev->layout; in super_90_sync()
1252 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1254 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1258 rdev_for_each(rdev2, mddev) { in super_90_sync()
1304 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1330 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1332 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1340 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1342 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1344 md_super_wait(rdev->mddev); in super_90_rdev_size_change()
1549 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1560 if (mddev->raid_disks == 0) { in super_1_validate()
1561 mddev->major_version = 1; in super_1_validate()
1562 mddev->patch_version = 0; in super_1_validate()
1563 mddev->external = 0; in super_1_validate()
1564 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1565 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); in super_1_validate()
1566 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); in super_1_validate()
1567 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1568 mddev->clevel[0] = 0; in super_1_validate()
1569 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1570 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1571 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1572 mddev->events = ev1; in super_1_validate()
1573 mddev->bitmap_info.offset = 0; in super_1_validate()
1574 mddev->bitmap_info.space = 0; in super_1_validate()
1578 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1579 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1580 mddev->reshape_backwards = 0; in super_1_validate()
1582 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1583 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1585 mddev->max_disks = (4096-256)/2; in super_1_validate()
1588 mddev->bitmap_info.file == NULL) { in super_1_validate()
1589 mddev->bitmap_info.offset = in super_1_validate()
1596 if (mddev->minor_version > 0) in super_1_validate()
1597 mddev->bitmap_info.space = 0; in super_1_validate()
1598 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1599 mddev->bitmap_info.space = in super_1_validate()
1600 8 - mddev->bitmap_info.offset; in super_1_validate()
1602 mddev->bitmap_info.space = in super_1_validate()
1603 -mddev->bitmap_info.offset; in super_1_validate()
1607 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1608 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1609 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1610 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1611 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1612 if (mddev->delta_disks < 0 || in super_1_validate()
1613 (mddev->delta_disks == 0 && in super_1_validate()
1616 mddev->reshape_backwards = 1; in super_1_validate()
1618 mddev->reshape_position = MaxSector; in super_1_validate()
1619 mddev->delta_disks = 0; in super_1_validate()
1620 mddev->new_level = mddev->level; in super_1_validate()
1621 mddev->new_layout = mddev->layout; in super_1_validate()
1622 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1625 } else if (mddev->pers == NULL) { in super_1_validate()
1632 if (ev1 < mddev->events) in super_1_validate()
1634 } else if (mddev->bitmap) { in super_1_validate()
1638 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1640 if (ev1 < mddev->events) in super_1_validate()
1643 if (ev1 < mddev->events) in super_1_validate()
1647 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1684 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1698 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
1699 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
1700 if (mddev->in_sync) in super_1_sync()
1701 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
1707 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
1708 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
1709 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
1710 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
1711 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
1720 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
1721 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
1731 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
1739 if (mddev->reshape_position != MaxSector) { in super_1_sync()
1741 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
1742 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
1743 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
1744 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
1745 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
1746 if (mddev->delta_disks == 0 && in super_1_sync()
1747 mddev->reshape_backwards) in super_1_sync()
1762 md_error(mddev, rdev); in super_1_sync()
1793 rdev_for_each(rdev2, mddev) in super_1_sync()
1810 rdev_for_each(rdev2, mddev) { in super_1_sync()
1830 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
1840 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
1857 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
1859 md_super_wait(rdev->mddev); in super_1_rdev_size_change()
1875 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
1886 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
1887 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
1888 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
1918 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
1920 if (mddev->sync_super) { in sync_super()
1921 mddev->sync_super(mddev, rdev); in sync_super()
1925 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
1927 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
1930 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
1955 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
1959 if (list_empty(&mddev->disks)) in md_integrity_register()
1961 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
1963 rdev_for_each(rdev, mddev) { in md_integrity_register()
1985 if (blk_integrity_register(mddev->gendisk, in md_integrity_register()
1988 mdname(mddev)); in md_integrity_register()
1991 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
1992 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { in md_integrity_register()
1994 mdname(mddev)); in md_integrity_register()
2002 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2007 if (!mddev->gendisk) in md_integrity_add_rdev()
2011 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2017 if (bi_rdev && blk_integrity_compare(mddev->gendisk, in md_integrity_add_rdev()
2020 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); in md_integrity_add_rdev()
2021 blk_integrity_unregister(mddev->gendisk); in md_integrity_add_rdev()
2025 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2033 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2037 if (rdev->sectors && (mddev->dev_sectors == 0 || in bind_rdev_to_array()
2038 rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2039 if (mddev->pers) { in bind_rdev_to_array()
2044 if (mddev->level > 0) in bind_rdev_to_array()
2047 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2057 if (mddev->pers) in bind_rdev_to_array()
2058 choice = mddev->raid_disks; in bind_rdev_to_array()
2059 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2063 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2069 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2071 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2078 rdev->mddev = mddev; in bind_rdev_to_array()
2081 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2089 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2090 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2093 mddev->recovery_disabled++; in bind_rdev_to_array()
2099 b, mdname(mddev)); in bind_rdev_to_array()
2114 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2117 rdev->mddev = NULL; in unbind_rdev_from_array()
2185 static void export_array(struct mddev *mddev) in export_array() argument
2189 while (!list_empty(&mddev->disks)) { in export_array()
2190 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2194 mddev->raid_disks = 0; in export_array()
2195 mddev->major_version = 0; in export_array()
2198 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2207 rdev_for_each(rdev, mddev) { in sync_sbs()
2208 if (rdev->sb_events == mddev->events || in sync_sbs()
2211 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2215 sync_super(mddev, rdev); in sync_sbs()
2221 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2228 if (mddev->ro) { in md_update_sb()
2230 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_update_sb()
2235 rdev_for_each(rdev, mddev) { in md_update_sb()
2237 mddev->delta_disks >= 0 && in md_update_sb()
2239 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2240 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2243 if (!mddev->persistent) { in md_update_sb()
2244 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_update_sb()
2245 clear_bit(MD_CHANGE_DEVS, &mddev->flags); in md_update_sb()
2246 if (!mddev->external) { in md_update_sb()
2247 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_update_sb()
2248 rdev_for_each(rdev, mddev) { in md_update_sb()
2252 md_error(mddev, rdev); in md_update_sb()
2259 wake_up(&mddev->sb_wait); in md_update_sb()
2263 spin_lock(&mddev->lock); in md_update_sb()
2265 mddev->utime = get_seconds(); in md_update_sb()
2267 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) in md_update_sb()
2269 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) in md_update_sb()
2277 if (mddev->degraded) in md_update_sb()
2289 sync_req = mddev->in_sync; in md_update_sb()
2294 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2295 && mddev->can_decrease_events in md_update_sb()
2296 && mddev->events != 1) { in md_update_sb()
2297 mddev->events--; in md_update_sb()
2298 mddev->can_decrease_events = 0; in md_update_sb()
2301 mddev->events ++; in md_update_sb()
2302 mddev->can_decrease_events = nospares; in md_update_sb()
2310 WARN_ON(mddev->events == 0); in md_update_sb()
2312 rdev_for_each(rdev, mddev) { in md_update_sb()
2319 sync_sbs(mddev, nospares); in md_update_sb()
2320 spin_unlock(&mddev->lock); in md_update_sb()
2323 mdname(mddev), mddev->in_sync); in md_update_sb()
2325 bitmap_update_sb(mddev->bitmap); in md_update_sb()
2326 rdev_for_each(rdev, mddev) { in md_update_sb()
2333 md_super_write(mddev,rdev, in md_update_sb()
2339 rdev->sb_events = mddev->events; in md_update_sb()
2341 md_super_write(mddev, rdev, in md_update_sb()
2352 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2356 md_super_wait(mddev); in md_update_sb()
2359 spin_lock(&mddev->lock); in md_update_sb()
2360 if (mddev->in_sync != sync_req || in md_update_sb()
2361 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { in md_update_sb()
2363 spin_unlock(&mddev->lock); in md_update_sb()
2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in md_update_sb()
2367 spin_unlock(&mddev->lock); in md_update_sb()
2368 wake_up(&mddev->sb_wait); in md_update_sb()
2369 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2370 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_update_sb()
2372 rdev_for_each(rdev, mddev) { in md_update_sb()
2386 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2389 if (!mddev->pers->hot_remove_disk) { in add_bound_rdev()
2394 super_types[mddev->major_version]. in add_bound_rdev()
2395 validate_super(mddev, rdev); in add_bound_rdev()
2396 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2405 set_bit(MD_CHANGE_DEVS, &mddev->flags); in add_bound_rdev()
2406 if (mddev->degraded) in add_bound_rdev()
2407 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2408 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2409 md_new_event(mddev); in add_bound_rdev()
2410 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2504 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2505 md_error(rdev->mddev, rdev); in state_store()
2514 struct mddev *mddev = rdev->mddev; in state_store() local
2515 if (mddev_is_clustered(mddev)) in state_store()
2516 md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2518 if (mddev_is_clustered(mddev)) in state_store()
2519 md_cluster_ops->metadata_update_start(mddev); in state_store()
2520 if (mddev->pers) in state_store()
2521 md_update_sb(mddev, 1); in state_store()
2522 md_new_event(mddev); in state_store()
2523 if (mddev_is_clustered(mddev)) in state_store()
2524 md_cluster_ops->metadata_update_finish(mddev); in state_store()
2542 md_error(rdev->mddev, rdev); in state_store()
2547 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2548 md_wakeup_thread(rdev->mddev->thread); in state_store()
2555 if (rdev->mddev->pers == NULL) { in state_store()
2575 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2576 md_wakeup_thread(rdev->mddev->thread); in state_store()
2589 if (rdev->mddev->pers) in state_store()
2597 if (rdev->mddev->pers) in state_store()
2611 if (!mddev_is_clustered(rdev->mddev) || in state_store()
2665 if (rdev->mddev->pers && slot == -1) { in slot_store()
2676 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
2679 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
2682 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
2683 md_wakeup_thread(rdev->mddev->thread); in slot_store()
2684 } else if (rdev->mddev->pers) { in slot_store()
2692 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
2695 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
2698 if (slot >= rdev->mddev->raid_disks && in slot_store()
2699 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2709 err = rdev->mddev->pers-> in slot_store()
2710 hot_add_disk(rdev->mddev, rdev); in slot_store()
2716 if (sysfs_link_rdev(rdev->mddev, rdev)) in slot_store()
2720 if (slot >= rdev->mddev->raid_disks && in slot_store()
2721 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
2748 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
2750 if (rdev->sectors && rdev->mddev->external) in offset_store()
2772 struct mddev *mddev = rdev->mddev; in new_offset_store() local
2777 if (mddev->sync_thread || in new_offset_store()
2778 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
2786 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
2795 mddev->reshape_backwards) in new_offset_store()
2802 !mddev->reshape_backwards) in new_offset_store()
2805 if (mddev->pers && mddev->persistent && in new_offset_store()
2806 !super_types[mddev->major_version] in new_offset_store()
2811 mddev->reshape_backwards = 1; in new_offset_store()
2813 mddev->reshape_backwards = 0; in new_offset_store()
2858 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
2890 struct mddev *mddev; in rdev_size_store() local
2895 for_each_mddev(mddev, tmp) { in rdev_size_store()
2898 rdev_for_each(rdev2, mddev) in rdev_size_store()
2908 mddev_put(mddev); in rdev_size_store()
2950 if (rdev->mddev->pers && in recovery_start_store()
3016 if (!rdev->mddev) in rdev_attr_show()
3028 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3034 rv = mddev ? mddev_lock(mddev): -EBUSY; in rdev_attr_store()
3036 if (rdev->mddev == NULL) in rdev_attr_store()
3040 mddev_unlock(mddev); in rdev_attr_store()
3172 static void analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3179 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3180 switch (super_types[mddev->major_version]. in analyze_sbs()
3181 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3195 super_types[mddev->major_version]. in analyze_sbs()
3196 validate_super(mddev, freshest); in analyze_sbs()
3199 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3200 if (mddev->max_disks && in analyze_sbs()
3201 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3202 i > mddev->max_disks)) { in analyze_sbs()
3205 mdname(mddev), bdevname(rdev->bdev, b), in analyze_sbs()
3206 mddev->max_disks); in analyze_sbs()
3211 if (super_types[mddev->major_version]. in analyze_sbs()
3212 validate_super(mddev, rdev)) { in analyze_sbs()
3228 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3232 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { in analyze_sbs()
3282 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3284 int msec = (mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3288 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3295 mddev->safemode_delay = 0; in safe_delay_store()
3297 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3302 mddev->safemode_delay = new_delay; in safe_delay_store()
3304 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3312 level_show(struct mddev *mddev, char *page) in level_show() argument
3316 spin_lock(&mddev->lock); in level_show()
3317 p = mddev->pers; in level_show()
3320 else if (mddev->clevel[0]) in level_show()
3321 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3322 else if (mddev->level != LEVEL_NONE) in level_show()
3323 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3326 spin_unlock(&mddev->lock); in level_show()
3331 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3344 rv = mddev_lock(mddev); in level_store()
3348 if (mddev->pers == NULL) { in level_store()
3349 strncpy(mddev->clevel, buf, slen); in level_store()
3350 if (mddev->clevel[slen-1] == '\n') in level_store()
3352 mddev->clevel[slen] = 0; in level_store()
3353 mddev->level = LEVEL_NONE; in level_store()
3358 if (mddev->ro) in level_store()
3368 if (mddev->sync_thread || in level_store()
3369 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3370 mddev->reshape_position != MaxSector || in level_store()
3371 mddev->sysfs_active) in level_store()
3375 if (!mddev->pers->quiesce) { in level_store()
3377 mdname(mddev), mddev->pers->name); in level_store()
3401 if (pers == mddev->pers) { in level_store()
3410 mdname(mddev), clevel); in level_store()
3415 rdev_for_each(rdev, mddev) in level_store()
3421 priv = pers->takeover(mddev); in level_store()
3423 mddev->new_level = mddev->level; in level_store()
3424 mddev->new_layout = mddev->layout; in level_store()
3425 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3426 mddev->raid_disks -= mddev->delta_disks; in level_store()
3427 mddev->delta_disks = 0; in level_store()
3428 mddev->reshape_backwards = 0; in level_store()
3431 mdname(mddev), clevel); in level_store()
3437 mddev_suspend(mddev); in level_store()
3438 mddev_detach(mddev); in level_store()
3440 spin_lock(&mddev->lock); in level_store()
3441 oldpers = mddev->pers; in level_store()
3442 oldpriv = mddev->private; in level_store()
3443 mddev->pers = pers; in level_store()
3444 mddev->private = priv; in level_store()
3445 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
3446 mddev->level = mddev->new_level; in level_store()
3447 mddev->layout = mddev->new_layout; in level_store()
3448 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
3449 mddev->delta_disks = 0; in level_store()
3450 mddev->reshape_backwards = 0; in level_store()
3451 mddev->degraded = 0; in level_store()
3452 spin_unlock(&mddev->lock); in level_store()
3455 mddev->external) { in level_store()
3463 mddev->in_sync = 0; in level_store()
3464 mddev->safemode_delay = 0; in level_store()
3465 mddev->safemode = 0; in level_store()
3468 oldpers->free(mddev, oldpriv); in level_store()
3473 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
3476 mdname(mddev)); in level_store()
3477 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
3482 if (mddev->to_remove == NULL) in level_store()
3483 mddev->to_remove = &md_redundancy_group; in level_store()
3486 rdev_for_each(rdev, mddev) { in level_store()
3489 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
3493 sysfs_unlink_rdev(mddev, rdev); in level_store()
3495 rdev_for_each(rdev, mddev) { in level_store()
3504 if (sysfs_link_rdev(mddev, rdev)) in level_store()
3507 rdev->raid_disk, mdname(mddev)); in level_store()
3515 mddev->in_sync = 1; in level_store()
3516 del_timer_sync(&mddev->safemode_timer); in level_store()
3518 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
3519 pers->run(mddev); in level_store()
3520 set_bit(MD_CHANGE_DEVS, &mddev->flags); in level_store()
3521 mddev_resume(mddev); in level_store()
3522 if (!mddev->thread) in level_store()
3523 md_update_sb(mddev, 1); in level_store()
3524 sysfs_notify(&mddev->kobj, NULL, "level"); in level_store()
3525 md_new_event(mddev); in level_store()
3528 mddev_unlock(mddev); in level_store()
3536 layout_show(struct mddev *mddev, char *page) in layout_show() argument
3539 if (mddev->reshape_position != MaxSector && in layout_show()
3540 mddev->layout != mddev->new_layout) in layout_show()
3542 mddev->new_layout, mddev->layout); in layout_show()
3543 return sprintf(page, "%d\n", mddev->layout); in layout_show()
3547 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
3555 err = mddev_lock(mddev); in layout_store()
3559 if (mddev->pers) { in layout_store()
3560 if (mddev->pers->check_reshape == NULL) in layout_store()
3562 else if (mddev->ro) in layout_store()
3565 mddev->new_layout = n; in layout_store()
3566 err = mddev->pers->check_reshape(mddev); in layout_store()
3568 mddev->new_layout = mddev->layout; in layout_store()
3571 mddev->new_layout = n; in layout_store()
3572 if (mddev->reshape_position == MaxSector) in layout_store()
3573 mddev->layout = n; in layout_store()
3575 mddev_unlock(mddev); in layout_store()
3582 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
3584 if (mddev->raid_disks == 0) in raid_disks_show()
3586 if (mddev->reshape_position != MaxSector && in raid_disks_show()
3587 mddev->delta_disks != 0) in raid_disks_show()
3588 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
3589 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
3590 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
3593 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3596 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
3605 err = mddev_lock(mddev); in raid_disks_store()
3608 if (mddev->pers) in raid_disks_store()
3609 err = update_raid_disks(mddev, n); in raid_disks_store()
3610 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
3612 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
3615 rdev_for_each(rdev, mddev) { in raid_disks_store()
3624 mddev->delta_disks = n - olddisks; in raid_disks_store()
3625 mddev->raid_disks = n; in raid_disks_store()
3626 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
3628 mddev->raid_disks = n; in raid_disks_store()
3630 mddev_unlock(mddev); in raid_disks_store()
3637 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
3639 if (mddev->reshape_position != MaxSector && in chunk_size_show()
3640 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
3642 mddev->new_chunk_sectors << 9, in chunk_size_show()
3643 mddev->chunk_sectors << 9); in chunk_size_show()
3644 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
3648 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
3657 err = mddev_lock(mddev); in chunk_size_store()
3660 if (mddev->pers) { in chunk_size_store()
3661 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
3663 else if (mddev->ro) in chunk_size_store()
3666 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
3667 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
3669 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
3672 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
3673 if (mddev->reshape_position == MaxSector) in chunk_size_store()
3674 mddev->chunk_sectors = n >> 9; in chunk_size_store()
3676 mddev_unlock(mddev); in chunk_size_store()
3683 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
3685 if (mddev->recovery_cp == MaxSector) in resync_start_show()
3687 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
3691 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
3697 err = mddev_lock(mddev); in resync_start_store()
3700 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
3708 mddev->recovery_cp = n; in resync_start_store()
3709 if (mddev->pers) in resync_start_store()
3710 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in resync_start_store()
3712 mddev_unlock(mddev); in resync_start_store()
3771 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
3775 if (mddev->pers) in array_state_show()
3776 switch(mddev->ro) { in array_state_show()
3784 if (mddev->in_sync) in array_state_show()
3786 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) in array_state_show()
3788 else if (mddev->safemode) in array_state_show()
3794 if (list_empty(&mddev->disks) && in array_state_show()
3795 mddev->raid_disks == 0 && in array_state_show()
3796 mddev->dev_sectors == 0) in array_state_show()
3804 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3805 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3806 static int do_md_run(struct mddev *mddev);
3807 static int restart_array(struct mddev *mddev);
3810 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
3815 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { in array_state_store()
3819 spin_lock(&mddev->lock); in array_state_store()
3821 restart_array(mddev); in array_state_store()
3822 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in array_state_store()
3823 wake_up(&mddev->sb_wait); in array_state_store()
3826 restart_array(mddev); in array_state_store()
3827 if (atomic_read(&mddev->writes_pending) == 0) { in array_state_store()
3828 if (mddev->in_sync == 0) { in array_state_store()
3829 mddev->in_sync = 1; in array_state_store()
3830 if (mddev->safemode == 1) in array_state_store()
3831 mddev->safemode = 0; in array_state_store()
3832 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in array_state_store()
3838 spin_unlock(&mddev->lock); in array_state_store()
3841 err = mddev_lock(mddev); in array_state_store()
3850 err = do_md_stop(mddev, 0, NULL); in array_state_store()
3854 if (mddev->pers) in array_state_store()
3855 err = do_md_stop(mddev, 2, NULL); in array_state_store()
3862 if (mddev->pers) in array_state_store()
3863 err = md_set_readonly(mddev, NULL); in array_state_store()
3865 mddev->ro = 1; in array_state_store()
3866 set_disk_ro(mddev->gendisk, 1); in array_state_store()
3867 err = do_md_run(mddev); in array_state_store()
3871 if (mddev->pers) { in array_state_store()
3872 if (mddev->ro == 0) in array_state_store()
3873 err = md_set_readonly(mddev, NULL); in array_state_store()
3874 else if (mddev->ro == 1) in array_state_store()
3875 err = restart_array(mddev); in array_state_store()
3877 mddev->ro = 2; in array_state_store()
3878 set_disk_ro(mddev->gendisk, 0); in array_state_store()
3881 mddev->ro = 2; in array_state_store()
3882 err = do_md_run(mddev); in array_state_store()
3886 if (mddev->pers) { in array_state_store()
3887 restart_array(mddev); in array_state_store()
3888 spin_lock(&mddev->lock); in array_state_store()
3889 if (atomic_read(&mddev->writes_pending) == 0) { in array_state_store()
3890 if (mddev->in_sync == 0) { in array_state_store()
3891 mddev->in_sync = 1; in array_state_store()
3892 if (mddev->safemode == 1) in array_state_store()
3893 mddev->safemode = 0; in array_state_store()
3894 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in array_state_store()
3899 spin_unlock(&mddev->lock); in array_state_store()
3904 if (mddev->pers) { in array_state_store()
3905 restart_array(mddev); in array_state_store()
3906 clear_bit(MD_CHANGE_PENDING, &mddev->flags); in array_state_store()
3907 wake_up(&mddev->sb_wait); in array_state_store()
3910 mddev->ro = 0; in array_state_store()
3911 set_disk_ro(mddev->gendisk, 0); in array_state_store()
3912 err = do_md_run(mddev); in array_state_store()
3922 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
3923 mddev->hold_active = 0; in array_state_store()
3924 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
3926 mddev_unlock(mddev); in array_state_store()
3933 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
3935 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
3939 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
3945 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
3956 null_show(struct mddev *mddev, char *page) in null_show() argument
3962 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
3990 err = mddev_lock(mddev); in new_dev_store()
3993 if (mddev->persistent) { in new_dev_store()
3994 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
3995 mddev->minor_version); in new_dev_store()
3996 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
3998 = list_entry(mddev->disks.next, in new_dev_store()
4000 err = super_types[mddev->major_version] in new_dev_store()
4001 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4005 } else if (mddev->external) in new_dev_store()
4011 mddev_unlock(mddev); in new_dev_store()
4014 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4018 mddev_unlock(mddev); in new_dev_store()
4026 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4032 err = mddev_lock(mddev); in bitmap_store()
4035 if (!mddev->bitmap) in bitmap_store()
4047 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4050 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4052 mddev_unlock(mddev); in bitmap_store()
4060 size_show(struct mddev *mddev, char *page) in size_show() argument
4063 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4066 static int update_size(struct mddev *mddev, sector_t num_sectors);
4069 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4080 err = mddev_lock(mddev); in size_store()
4083 if (mddev->pers) { in size_store()
4084 if (mddev_is_clustered(mddev)) in size_store()
4085 md_cluster_ops->metadata_update_start(mddev); in size_store()
4086 err = update_size(mddev, sectors); in size_store()
4087 md_update_sb(mddev, 1); in size_store()
4088 if (mddev_is_clustered(mddev)) in size_store()
4089 md_cluster_ops->metadata_update_finish(mddev); in size_store()
4091 if (mddev->dev_sectors == 0 || in size_store()
4092 mddev->dev_sectors > sectors) in size_store()
4093 mddev->dev_sectors = sectors; in size_store()
4097 mddev_unlock(mddev); in size_store()
4111 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4113 if (mddev->persistent) in metadata_show()
4115 mddev->major_version, mddev->minor_version); in metadata_show()
4116 else if (mddev->external) in metadata_show()
4117 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4123 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4133 err = mddev_lock(mddev); in metadata_store()
4137 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4139 else if (!list_empty(&mddev->disks)) in metadata_store()
4144 mddev->persistent = 0; in metadata_store()
4145 mddev->external = 0; in metadata_store()
4146 mddev->major_version = 0; in metadata_store()
4147 mddev->minor_version = 90; in metadata_store()
4152 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4153 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4154 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4155 mddev->metadata_type[namelen] = 0; in metadata_store()
4156 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4157 mddev->metadata_type[--namelen] = 0; in metadata_store()
4158 mddev->persistent = 0; in metadata_store()
4159 mddev->external = 1; in metadata_store()
4160 mddev->major_version = 0; in metadata_store()
4161 mddev->minor_version = 90; in metadata_store()
4175 mddev->major_version = major; in metadata_store()
4176 mddev->minor_version = minor; in metadata_store()
4177 mddev->persistent = 1; in metadata_store()
4178 mddev->external = 0; in metadata_store()
4181 mddev_unlock(mddev); in metadata_store()
4189 action_show(struct mddev *mddev, char *page) in action_show() argument
4192 unsigned long recovery = mddev->recovery; in action_show()
4196 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4213 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4215 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4221 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4223 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4224 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in action_store()
4225 mddev_lock(mddev) == 0) { in action_store()
4227 if (mddev->sync_thread) { in action_store()
4228 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4229 md_reap_sync_thread(mddev); in action_store()
4231 mddev_unlock(mddev); in action_store()
4233 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in action_store()
4234 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in action_store()
4237 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4239 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4240 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4243 if (mddev->pers->start_reshape == NULL) in action_store()
4245 err = mddev_lock(mddev); in action_store()
4247 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4248 err = mddev->pers->start_reshape(mddev); in action_store()
4249 mddev_unlock(mddev); in action_store()
4253 sysfs_notify(&mddev->kobj, NULL, "degraded"); in action_store()
4256 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4259 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4260 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4261 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4263 if (mddev->ro == 2) { in action_store()
4267 mddev->ro = 0; in action_store()
4268 md_wakeup_thread(mddev->sync_thread); in action_store()
4270 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4271 md_wakeup_thread(mddev->thread); in action_store()
4272 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4280 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4282 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4288 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4292 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4298 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4300 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4301 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4305 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4310 mddev->sync_speed_min = 0; in sync_min_store()
4316 mddev->sync_speed_min = min; in sync_min_store()
4324 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4326 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4327 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4331 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4336 mddev->sync_speed_max = 0; in sync_max_store()
4342 mddev->sync_speed_max = max; in sync_max_store()
4350 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4352 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
4357 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
4359 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
4363 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
4373 mddev->parallel_resync = n; in sync_force_parallel_store()
4375 if (mddev->sync_thread) in sync_force_parallel_store()
4387 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
4390 if (mddev->curr_resync == 0) in sync_speed_show()
4392 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
4393 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
4395 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
4402 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
4406 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
4409 if (mddev->curr_resync == 1 || in sync_completed_show()
4410 mddev->curr_resync == 2) in sync_completed_show()
4413 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
4414 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
4415 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
4417 max_sectors = mddev->dev_sectors; in sync_completed_show()
4419 resync = mddev->curr_resync_completed; in sync_completed_show()
4427 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
4430 (unsigned long long)mddev->resync_min); in min_sync_show()
4433 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
4441 spin_lock(&mddev->lock); in min_sync_store()
4443 if (min > mddev->resync_max) in min_sync_store()
4447 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
4451 mddev->resync_min = round_down(min, 8); in min_sync_store()
4455 spin_unlock(&mddev->lock); in min_sync_store()
4463 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
4465 if (mddev->resync_max == MaxSector) in max_sync_show()
4469 (unsigned long long)mddev->resync_max); in max_sync_show()
4472 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
4475 spin_lock(&mddev->lock); in max_sync_store()
4477 mddev->resync_max = MaxSector; in max_sync_store()
4485 if (max < mddev->resync_min) in max_sync_store()
4489 if (max < mddev->resync_max && in max_sync_store()
4490 mddev->ro == 0 && in max_sync_store()
4491 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
4495 chunk = mddev->chunk_sectors; in max_sync_store()
4503 mddev->resync_max = max; in max_sync_store()
4505 wake_up(&mddev->recovery_wait); in max_sync_store()
4508 spin_unlock(&mddev->lock); in max_sync_store()
4516 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
4518 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
4522 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
4532 err = mddev_lock(mddev); in suspend_lo_store()
4536 if (mddev->pers == NULL || in suspend_lo_store()
4537 mddev->pers->quiesce == NULL) in suspend_lo_store()
4539 old = mddev->suspend_lo; in suspend_lo_store()
4540 mddev->suspend_lo = new; in suspend_lo_store()
4543 mddev->pers->quiesce(mddev, 2); in suspend_lo_store()
4546 mddev->pers->quiesce(mddev, 1); in suspend_lo_store()
4547 mddev->pers->quiesce(mddev, 0); in suspend_lo_store()
4551 mddev_unlock(mddev); in suspend_lo_store()
4558 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
4560 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
4564 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
4574 err = mddev_lock(mddev); in suspend_hi_store()
4578 if (mddev->pers == NULL || in suspend_hi_store()
4579 mddev->pers->quiesce == NULL) in suspend_hi_store()
4581 old = mddev->suspend_hi; in suspend_hi_store()
4582 mddev->suspend_hi = new; in suspend_hi_store()
4585 mddev->pers->quiesce(mddev, 2); in suspend_hi_store()
4588 mddev->pers->quiesce(mddev, 1); in suspend_hi_store()
4589 mddev->pers->quiesce(mddev, 0); in suspend_hi_store()
4593 mddev_unlock(mddev); in suspend_hi_store()
4600 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
4602 if (mddev->reshape_position != MaxSector) in reshape_position_show()
4604 (unsigned long long)mddev->reshape_position); in reshape_position_show()
4610 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
4619 err = mddev_lock(mddev); in reshape_position_store()
4623 if (mddev->pers) in reshape_position_store()
4625 mddev->reshape_position = new; in reshape_position_store()
4626 mddev->delta_disks = 0; in reshape_position_store()
4627 mddev->reshape_backwards = 0; in reshape_position_store()
4628 mddev->new_level = mddev->level; in reshape_position_store()
4629 mddev->new_layout = mddev->layout; in reshape_position_store()
4630 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
4631 rdev_for_each(rdev, mddev) in reshape_position_store()
4635 mddev_unlock(mddev); in reshape_position_store()
4644 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
4647 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
4651 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
4662 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
4665 err = mddev_lock(mddev); in reshape_direction_store()
4669 if (mddev->delta_disks) in reshape_direction_store()
4671 else if (mddev->persistent && in reshape_direction_store()
4672 mddev->major_version == 0) in reshape_direction_store()
4675 mddev->reshape_backwards = backwards; in reshape_direction_store()
4676 mddev_unlock(mddev); in reshape_direction_store()
4685 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
4687 if (mddev->external_size) in array_size_show()
4689 (unsigned long long)mddev->array_sectors/2); in array_size_show()
4695 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
4700 err = mddev_lock(mddev); in array_size_store()
4705 if (mddev->pers) in array_size_store()
4706 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
4708 sectors = mddev->array_sectors; in array_size_store()
4710 mddev->external_size = 0; in array_size_store()
4714 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
4717 mddev->external_size = 1; in array_size_store()
4721 mddev->array_sectors = sectors; in array_size_store()
4722 if (mddev->pers) { in array_size_store()
4723 set_capacity(mddev->gendisk, mddev->array_sectors); in array_size_store()
4724 revalidate_disk(mddev->gendisk); in array_size_store()
4727 mddev_unlock(mddev); in array_size_store()
4779 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
4785 if (list_empty(&mddev->all_mddevs)) { in md_attr_show()
4789 mddev_get(mddev); in md_attr_show()
4792 rv = entry->show(mddev, page); in md_attr_show()
4793 mddev_put(mddev); in md_attr_show()
4802 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
4810 if (list_empty(&mddev->all_mddevs)) { in md_attr_store()
4814 mddev_get(mddev); in md_attr_store()
4816 rv = entry->store(mddev, page, length); in md_attr_store()
4817 mddev_put(mddev); in md_attr_store()
4823 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_free() local
4825 if (mddev->sysfs_state) in md_free()
4826 sysfs_put(mddev->sysfs_state); in md_free()
4828 if (mddev->queue) in md_free()
4829 blk_cleanup_queue(mddev->queue); in md_free()
4830 if (mddev->gendisk) { in md_free()
4831 del_gendisk(mddev->gendisk); in md_free()
4832 put_disk(mddev->gendisk); in md_free()
4835 kfree(mddev); in md_free()
4852 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
4854 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); in mddev_delayed_delete()
4855 kobject_del(&mddev->kobj); in mddev_delayed_delete()
4856 kobject_put(&mddev->kobj); in mddev_delayed_delete()
4862 struct mddev *mddev = mddev_find(dev); in md_alloc() local
4869 if (!mddev) in md_alloc()
4872 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
4874 unit = MINOR(mddev->unit) >> shift; in md_alloc()
4883 if (mddev->gendisk) in md_alloc()
4889 struct mddev *mddev2; in md_alloc()
4902 mddev->queue = blk_alloc_queue(GFP_KERNEL); in md_alloc()
4903 if (!mddev->queue) in md_alloc()
4905 mddev->queue->queuedata = mddev; in md_alloc()
4907 blk_queue_make_request(mddev->queue, md_make_request); in md_alloc()
4908 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
4912 blk_cleanup_queue(mddev->queue); in md_alloc()
4913 mddev->queue = NULL; in md_alloc()
4916 disk->major = MAJOR(mddev->unit); in md_alloc()
4925 disk->private_data = mddev; in md_alloc()
4926 disk->queue = mddev->queue; in md_alloc()
4927 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); in md_alloc()
4933 mddev->gendisk = disk; in md_alloc()
4937 mutex_lock(&mddev->open_mutex); in md_alloc()
4940 error = kobject_init_and_add(&mddev->kobj, &md_ktype, in md_alloc()
4950 if (mddev->kobj.sd && in md_alloc()
4951 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) in md_alloc()
4953 mutex_unlock(&mddev->open_mutex); in md_alloc()
4956 if (!error && mddev->kobj.sd) { in md_alloc()
4957 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
4958 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
4960 mddev_put(mddev); in md_alloc()
4991 struct mddev *mddev = (struct mddev *) data; in md_safemode_timeout() local
4993 if (!atomic_read(&mddev->writes_pending)) { in md_safemode_timeout()
4994 mddev->safemode = 1; in md_safemode_timeout()
4995 if (mddev->external) in md_safemode_timeout()
4996 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
4998 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5003 int md_run(struct mddev *mddev) in md_run() argument
5009 if (list_empty(&mddev->disks)) in md_run()
5013 if (mddev->pers) in md_run()
5016 if (mddev->sysfs_active) in md_run()
5022 if (!mddev->raid_disks) { in md_run()
5023 if (!mddev->persistent) in md_run()
5025 analyze_sbs(mddev); in md_run()
5028 if (mddev->level != LEVEL_NONE) in md_run()
5029 request_module("md-level-%d", mddev->level); in md_run()
5030 else if (mddev->clevel[0]) in md_run()
5031 request_module("md-%s", mddev->clevel); in md_run()
5038 rdev_for_each(rdev, mddev) { in md_run()
5051 if (mddev->dev_sectors && in md_run()
5052 rdev->data_offset + mddev->dev_sectors in md_run()
5055 mdname(mddev)); in md_run()
5062 mdname(mddev)); in md_run()
5069 if (mddev->bio_set == NULL) in md_run()
5070 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); in md_run()
5073 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5076 if (mddev->level != LEVEL_NONE) in md_run()
5078 mddev->level); in md_run()
5081 mddev->clevel); in md_run()
5085 if (mddev->level != pers->level) { in md_run()
5086 mddev->level = pers->level; in md_run()
5087 mddev->new_level = pers->level; in md_run()
5089 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5091 if (mddev->reshape_position != MaxSector && in md_run()
5106 rdev_for_each(rdev, mddev) in md_run()
5107 rdev_for_each(rdev2, mddev) { in md_run()
5115 mdname(mddev), in md_run()
5128 mddev->recovery = 0; in md_run()
5130 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5132 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5134 if (start_readonly && mddev->ro == 0) in md_run()
5135 mddev->ro = 2; /* read-only, but switch on first write */ in md_run()
5137 err = pers->run(mddev); in md_run()
5140 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5141 WARN_ONCE(!mddev->external_size, "%s: default size too small," in md_run()
5145 (unsigned long long)mddev->array_sectors / 2, in md_run()
5146 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5150 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5153 bitmap = bitmap_create(mddev, -1); in md_run()
5157 mdname(mddev), err); in md_run()
5159 mddev->bitmap = bitmap; in md_run()
5163 mddev_detach(mddev); in md_run()
5164 if (mddev->private) in md_run()
5165 pers->free(mddev, mddev->private); in md_run()
5166 mddev->private = NULL; in md_run()
5168 bitmap_destroy(mddev); in md_run()
5171 if (mddev->queue) { in md_run()
5172 mddev->queue->backing_dev_info.congested_data = mddev; in md_run()
5173 mddev->queue->backing_dev_info.congested_fn = md_congested; in md_run()
5174 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec); in md_run()
5177 if (mddev->kobj.sd && in md_run()
5178 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
5181 mdname(mddev)); in md_run()
5182 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
5183 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ in md_run()
5184 mddev->ro = 0; in md_run()
5186 atomic_set(&mddev->writes_pending,0); in md_run()
5187 atomic_set(&mddev->max_corr_read_errors, in md_run()
5189 mddev->safemode = 0; in md_run()
5190 mddev->safemode_timer.function = md_safemode_timeout; in md_run()
5191 mddev->safemode_timer.data = (unsigned long) mddev; in md_run()
5192 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ in md_run()
5193 mddev->in_sync = 1; in md_run()
5195 spin_lock(&mddev->lock); in md_run()
5196 mddev->pers = pers; in md_run()
5197 mddev->ready = 1; in md_run()
5198 spin_unlock(&mddev->lock); in md_run()
5199 rdev_for_each(rdev, mddev) in md_run()
5201 if (sysfs_link_rdev(mddev, rdev)) in md_run()
5204 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
5206 if (mddev->flags & MD_UPDATE_SB_FLAGS) in md_run()
5207 md_update_sb(mddev, 0); in md_run()
5209 md_new_event(mddev); in md_run()
5210 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_run()
5211 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_run()
5212 sysfs_notify(&mddev->kobj, NULL, "degraded"); in md_run()
5217 static int do_md_run(struct mddev *mddev) in do_md_run() argument
5221 err = md_run(mddev); in do_md_run()
5224 err = bitmap_load(mddev); in do_md_run()
5226 bitmap_destroy(mddev); in do_md_run()
5230 md_wakeup_thread(mddev->thread); in do_md_run()
5231 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
5233 set_capacity(mddev->gendisk, mddev->array_sectors); in do_md_run()
5234 revalidate_disk(mddev->gendisk); in do_md_run()
5235 mddev->changed = 1; in do_md_run()
5236 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
5241 static int restart_array(struct mddev *mddev) in restart_array() argument
5243 struct gendisk *disk = mddev->gendisk; in restart_array()
5246 if (list_empty(&mddev->disks)) in restart_array()
5248 if (!mddev->pers) in restart_array()
5250 if (!mddev->ro) in restart_array()
5252 mddev->safemode = 0; in restart_array()
5253 mddev->ro = 0; in restart_array()
5256 mdname(mddev)); in restart_array()
5258 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
5259 md_wakeup_thread(mddev->thread); in restart_array()
5260 md_wakeup_thread(mddev->sync_thread); in restart_array()
5261 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
5265 static void md_clean(struct mddev *mddev) in md_clean() argument
5267 mddev->array_sectors = 0; in md_clean()
5268 mddev->external_size = 0; in md_clean()
5269 mddev->dev_sectors = 0; in md_clean()
5270 mddev->raid_disks = 0; in md_clean()
5271 mddev->recovery_cp = 0; in md_clean()
5272 mddev->resync_min = 0; in md_clean()
5273 mddev->resync_max = MaxSector; in md_clean()
5274 mddev->reshape_position = MaxSector; in md_clean()
5275 mddev->external = 0; in md_clean()
5276 mddev->persistent = 0; in md_clean()
5277 mddev->level = LEVEL_NONE; in md_clean()
5278 mddev->clevel[0] = 0; in md_clean()
5279 mddev->flags = 0; in md_clean()
5280 mddev->ro = 0; in md_clean()
5281 mddev->metadata_type[0] = 0; in md_clean()
5282 mddev->chunk_sectors = 0; in md_clean()
5283 mddev->ctime = mddev->utime = 0; in md_clean()
5284 mddev->layout = 0; in md_clean()
5285 mddev->max_disks = 0; in md_clean()
5286 mddev->events = 0; in md_clean()
5287 mddev->can_decrease_events = 0; in md_clean()
5288 mddev->delta_disks = 0; in md_clean()
5289 mddev->reshape_backwards = 0; in md_clean()
5290 mddev->new_level = LEVEL_NONE; in md_clean()
5291 mddev->new_layout = 0; in md_clean()
5292 mddev->new_chunk_sectors = 0; in md_clean()
5293 mddev->curr_resync = 0; in md_clean()
5294 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
5295 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
5296 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
5297 mddev->recovery = 0; in md_clean()
5298 mddev->in_sync = 0; in md_clean()
5299 mddev->changed = 0; in md_clean()
5300 mddev->degraded = 0; in md_clean()
5301 mddev->safemode = 0; in md_clean()
5302 mddev->private = NULL; in md_clean()
5303 mddev->merge_check_needed = 0; in md_clean()
5304 mddev->bitmap_info.offset = 0; in md_clean()
5305 mddev->bitmap_info.default_offset = 0; in md_clean()
5306 mddev->bitmap_info.default_space = 0; in md_clean()
5307 mddev->bitmap_info.chunksize = 0; in md_clean()
5308 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
5309 mddev->bitmap_info.max_write_behind = 0; in md_clean()
5312 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
5314 if (mddev_is_clustered(mddev)) in __md_stop_writes()
5315 md_cluster_ops->metadata_update_start(mddev); in __md_stop_writes()
5316 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
5318 if (mddev->sync_thread) { in __md_stop_writes()
5319 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
5320 md_reap_sync_thread(mddev); in __md_stop_writes()
5323 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
5325 bitmap_flush(mddev); in __md_stop_writes()
5326 md_super_wait(mddev); in __md_stop_writes()
5328 if (mddev->ro == 0 && in __md_stop_writes()
5329 (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { in __md_stop_writes()
5331 mddev->in_sync = 1; in __md_stop_writes()
5332 md_update_sb(mddev, 1); in __md_stop_writes()
5334 if (mddev_is_clustered(mddev)) in __md_stop_writes()
5335 md_cluster_ops->metadata_update_finish(mddev); in __md_stop_writes()
5338 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
5340 mddev_lock_nointr(mddev); in md_stop_writes()
5341 __md_stop_writes(mddev); in md_stop_writes()
5342 mddev_unlock(mddev); in md_stop_writes()
5346 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
5348 struct bitmap *bitmap = mddev->bitmap; in mddev_detach()
5352 mdname(mddev)); in mddev_detach()
5357 if (mddev->pers && mddev->pers->quiesce) { in mddev_detach()
5358 mddev->pers->quiesce(mddev, 1); in mddev_detach()
5359 mddev->pers->quiesce(mddev, 0); in mddev_detach()
5361 md_unregister_thread(&mddev->thread); in mddev_detach()
5362 if (mddev->queue) in mddev_detach()
5363 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
5366 static void __md_stop(struct mddev *mddev) in __md_stop() argument
5368 struct md_personality *pers = mddev->pers; in __md_stop()
5369 mddev_detach(mddev); in __md_stop()
5372 spin_lock(&mddev->lock); in __md_stop()
5373 mddev->ready = 0; in __md_stop()
5374 mddev->pers = NULL; in __md_stop()
5375 spin_unlock(&mddev->lock); in __md_stop()
5376 pers->free(mddev, mddev->private); in __md_stop()
5377 mddev->private = NULL; in __md_stop()
5378 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
5379 mddev->to_remove = &md_redundancy_group; in __md_stop()
5381 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
5384 void md_stop(struct mddev *mddev) in md_stop() argument
5389 __md_stop(mddev); in md_stop()
5390 bitmap_destroy(mddev); in md_stop()
5391 if (mddev->bio_set) in md_stop()
5392 bioset_free(mddev->bio_set); in md_stop()
5397 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
5402 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
5404 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5405 md_wakeup_thread(mddev->thread); in md_set_readonly()
5407 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
5408 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
5409 if (mddev->sync_thread) in md_set_readonly()
5412 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
5414 mddev_unlock(mddev); in md_set_readonly()
5416 &mddev->recovery)); in md_set_readonly()
5417 mddev_lock_nointr(mddev); in md_set_readonly()
5419 mutex_lock(&mddev->open_mutex); in md_set_readonly()
5420 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
5421 mddev->sync_thread || in md_set_readonly()
5422 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in md_set_readonly()
5423 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { in md_set_readonly()
5424 printk("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
5426 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5427 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
5428 md_wakeup_thread(mddev->thread); in md_set_readonly()
5433 if (mddev->pers) { in md_set_readonly()
5434 __md_stop_writes(mddev); in md_set_readonly()
5437 if (mddev->ro==1) in md_set_readonly()
5439 mddev->ro = 1; in md_set_readonly()
5440 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
5441 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
5442 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
5443 md_wakeup_thread(mddev->thread); in md_set_readonly()
5444 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
5448 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
5456 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
5459 struct gendisk *disk = mddev->gendisk; in do_md_stop()
5463 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
5465 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
5466 md_wakeup_thread(mddev->thread); in do_md_stop()
5468 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
5469 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
5470 if (mddev->sync_thread) in do_md_stop()
5473 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
5475 mddev_unlock(mddev); in do_md_stop()
5476 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
5478 &mddev->recovery))); in do_md_stop()
5479 mddev_lock_nointr(mddev); in do_md_stop()
5481 mutex_lock(&mddev->open_mutex); in do_md_stop()
5482 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
5483 mddev->sysfs_active || in do_md_stop()
5484 mddev->sync_thread || in do_md_stop()
5485 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in do_md_stop()
5486 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { in do_md_stop()
5487 printk("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
5488 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5490 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
5491 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
5492 md_wakeup_thread(mddev->thread); in do_md_stop()
5496 if (mddev->pers) { in do_md_stop()
5497 if (mddev->ro) in do_md_stop()
5500 __md_stop_writes(mddev); in do_md_stop()
5501 __md_stop(mddev); in do_md_stop()
5502 mddev->queue->merge_bvec_fn = NULL; in do_md_stop()
5503 mddev->queue->backing_dev_info.congested_fn = NULL; in do_md_stop()
5506 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
5508 rdev_for_each(rdev, mddev) in do_md_stop()
5510 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
5513 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5514 mddev->changed = 1; in do_md_stop()
5517 if (mddev->ro) in do_md_stop()
5518 mddev->ro = 0; in do_md_stop()
5520 mutex_unlock(&mddev->open_mutex); in do_md_stop()
5525 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); in do_md_stop()
5527 bitmap_destroy(mddev); in do_md_stop()
5528 if (mddev->bitmap_info.file) { in do_md_stop()
5529 struct file *f = mddev->bitmap_info.file; in do_md_stop()
5530 spin_lock(&mddev->lock); in do_md_stop()
5531 mddev->bitmap_info.file = NULL; in do_md_stop()
5532 spin_unlock(&mddev->lock); in do_md_stop()
5535 mddev->bitmap_info.offset = 0; in do_md_stop()
5537 export_array(mddev); in do_md_stop()
5539 md_clean(mddev); in do_md_stop()
5540 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_stop()
5541 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
5542 mddev->hold_active = 0; in do_md_stop()
5545 md_new_event(mddev); in do_md_stop()
5546 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
5551 static void autorun_array(struct mddev *mddev) in autorun_array() argument
5556 if (list_empty(&mddev->disks)) in autorun_array()
5561 rdev_for_each(rdev, mddev) { in autorun_array()
5567 err = do_md_run(mddev); in autorun_array()
5570 do_md_stop(mddev, 0, NULL); in autorun_array()
5589 struct mddev *mddev; in autorun_devices() local
5629 mddev = mddev_find(dev); in autorun_devices()
5630 if (!mddev || !mddev->gendisk) { in autorun_devices()
5631 if (mddev) in autorun_devices()
5632 mddev_put(mddev); in autorun_devices()
5637 if (mddev_lock(mddev)) in autorun_devices()
5639 mdname(mddev)); in autorun_devices()
5640 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
5641 || !list_empty(&mddev->disks)) { in autorun_devices()
5644 mdname(mddev), bdevname(rdev0->bdev,b)); in autorun_devices()
5645 mddev_unlock(mddev); in autorun_devices()
5647 printk(KERN_INFO "md: created %s\n", mdname(mddev)); in autorun_devices()
5648 mddev->persistent = 1; in autorun_devices()
5651 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
5654 autorun_array(mddev); in autorun_devices()
5655 mddev_unlock(mddev); in autorun_devices()
5664 mddev_put(mddev); in autorun_devices()
5684 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
5692 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
5706 info.major_version = mddev->major_version; in get_array_info()
5707 info.minor_version = mddev->minor_version; in get_array_info()
5709 info.ctime = mddev->ctime; in get_array_info()
5710 info.level = mddev->level; in get_array_info()
5711 info.size = mddev->dev_sectors / 2; in get_array_info()
5712 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
5715 info.raid_disks = mddev->raid_disks; in get_array_info()
5716 info.md_minor = mddev->md_minor; in get_array_info()
5717 info.not_persistent= !mddev->persistent; in get_array_info()
5719 info.utime = mddev->utime; in get_array_info()
5721 if (mddev->in_sync) in get_array_info()
5723 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
5725 if (mddev_is_clustered(mddev)) in get_array_info()
5732 info.layout = mddev->layout; in get_array_info()
5733 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
5741 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
5752 spin_lock(&mddev->lock); in get_bitmap_file()
5754 if (!mddev->bitmap_info.file) in get_bitmap_file()
5756 else if ((ptr = d_path(&mddev->bitmap_info.file->f_path, in get_bitmap_file()
5763 spin_unlock(&mddev->lock); in get_bitmap_file()
5773 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
5782 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
5809 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) in add_new_disk() argument
5815 if (mddev_is_clustered(mddev) && in add_new_disk()
5818 mdname(mddev)); in add_new_disk()
5825 if (!mddev->raid_disks) { in add_new_disk()
5828 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in add_new_disk()
5835 if (!list_empty(&mddev->disks)) { in add_new_disk()
5837 = list_entry(mddev->disks.next, in add_new_disk()
5839 err = super_types[mddev->major_version] in add_new_disk()
5840 .load_super(rdev, rdev0, mddev->minor_version); in add_new_disk()
5850 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5861 if (mddev->pers) { in add_new_disk()
5863 if (!mddev->pers->hot_add_disk) { in add_new_disk()
5866 mdname(mddev)); in add_new_disk()
5869 if (mddev->persistent) in add_new_disk()
5870 rdev = md_import_device(dev, mddev->major_version, in add_new_disk()
5871 mddev->minor_version); in add_new_disk()
5881 if (!mddev->persistent) { in add_new_disk()
5883 info->raid_disk < mddev->raid_disks) { in add_new_disk()
5891 super_types[mddev->major_version]. in add_new_disk()
5892 validate_super(mddev, rdev); in add_new_disk()
5911 if (mddev_is_clustered(mddev)) { in add_new_disk()
5915 err = md_cluster_ops->new_disk_ack(mddev, true); in add_new_disk()
5922 err = md_cluster_ops->add_new_disk_start(mddev, rdev); in add_new_disk()
5924 md_cluster_ops->add_new_disk_finish(mddev); in add_new_disk()
5932 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5937 if (mddev_is_clustered(mddev) && in add_new_disk()
5939 md_cluster_ops->add_new_disk_finish(mddev); in add_new_disk()
5946 if (mddev->major_version != 0) { in add_new_disk()
5948 mdname(mddev)); in add_new_disk()
5962 if (info->raid_disk < mddev->raid_disks) in add_new_disk()
5967 if (rdev->raid_disk < mddev->raid_disks) in add_new_disk()
5974 if (!mddev->persistent) { in add_new_disk()
5981 err = bind_rdev_to_array(rdev, mddev); in add_new_disk()
5991 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
5996 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6000 if (mddev_is_clustered(mddev)) in hot_remove_disk()
6001 md_cluster_ops->metadata_update_start(mddev); in hot_remove_disk()
6004 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6009 if (mddev_is_clustered(mddev)) in hot_remove_disk()
6010 md_cluster_ops->remove_disk(mddev, rdev); in hot_remove_disk()
6013 md_update_sb(mddev, 1); in hot_remove_disk()
6014 md_new_event(mddev); in hot_remove_disk()
6016 if (mddev_is_clustered(mddev)) in hot_remove_disk()
6017 md_cluster_ops->metadata_update_finish(mddev); in hot_remove_disk()
6021 if (mddev_is_clustered(mddev)) in hot_remove_disk()
6022 md_cluster_ops->metadata_update_cancel(mddev); in hot_remove_disk()
6024 bdevname(rdev->bdev,b), mdname(mddev)); in hot_remove_disk()
6028 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6034 if (!mddev->pers) in hot_add_disk()
6037 if (mddev->major_version != 0) { in hot_add_disk()
6040 mdname(mddev)); in hot_add_disk()
6043 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6046 mdname(mddev)); in hot_add_disk()
6058 if (mddev->persistent) in hot_add_disk()
6068 bdevname(rdev->bdev,b), mdname(mddev)); in hot_add_disk()
6073 if (mddev_is_clustered(mddev)) in hot_add_disk()
6074 md_cluster_ops->metadata_update_start(mddev); in hot_add_disk()
6078 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6089 md_update_sb(mddev, 1); in hot_add_disk()
6091 if (mddev_is_clustered(mddev)) in hot_add_disk()
6092 md_cluster_ops->metadata_update_finish(mddev); in hot_add_disk()
6097 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
6098 md_wakeup_thread(mddev->thread); in hot_add_disk()
6099 md_new_event(mddev); in hot_add_disk()
6103 if (mddev_is_clustered(mddev)) in hot_add_disk()
6104 md_cluster_ops->metadata_update_cancel(mddev); in hot_add_disk()
6110 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
6114 if (mddev->pers) { in set_bitmap_file()
6115 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
6117 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
6126 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
6132 mdname(mddev)); in set_bitmap_file()
6139 mdname(mddev)); in set_bitmap_file()
6143 mdname(mddev)); in set_bitmap_file()
6147 mdname(mddev)); in set_bitmap_file()
6154 mddev->bitmap_info.file = f; in set_bitmap_file()
6155 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
6156 } else if (mddev->bitmap == NULL) in set_bitmap_file()
6159 if (mddev->pers) { in set_bitmap_file()
6160 mddev->pers->quiesce(mddev, 1); in set_bitmap_file()
6164 bitmap = bitmap_create(mddev, -1); in set_bitmap_file()
6166 mddev->bitmap = bitmap; in set_bitmap_file()
6167 err = bitmap_load(mddev); in set_bitmap_file()
6172 bitmap_destroy(mddev); in set_bitmap_file()
6175 mddev->pers->quiesce(mddev, 0); in set_bitmap_file()
6178 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
6180 spin_lock(&mddev->lock); in set_bitmap_file()
6181 mddev->bitmap_info.file = NULL; in set_bitmap_file()
6182 spin_unlock(&mddev->lock); in set_bitmap_file()
6203 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) in set_array_info() argument
6217 mddev->major_version = info->major_version; in set_array_info()
6218 mddev->minor_version = info->minor_version; in set_array_info()
6219 mddev->patch_version = info->patch_version; in set_array_info()
6220 mddev->persistent = !info->not_persistent; in set_array_info()
6224 mddev->ctime = get_seconds(); in set_array_info()
6227 mddev->major_version = MD_MAJOR_VERSION; in set_array_info()
6228 mddev->minor_version = MD_MINOR_VERSION; in set_array_info()
6229 mddev->patch_version = MD_PATCHLEVEL_VERSION; in set_array_info()
6230 mddev->ctime = get_seconds(); in set_array_info()
6232 mddev->level = info->level; in set_array_info()
6233 mddev->clevel[0] = 0; in set_array_info()
6234 mddev->dev_sectors = 2 * (sector_t)info->size; in set_array_info()
6235 mddev->raid_disks = info->raid_disks; in set_array_info()
6240 mddev->recovery_cp = MaxSector; in set_array_info()
6242 mddev->recovery_cp = 0; in set_array_info()
6243 mddev->persistent = ! info->not_persistent; in set_array_info()
6244 mddev->external = 0; in set_array_info()
6246 mddev->layout = info->layout; in set_array_info()
6247 mddev->chunk_sectors = info->chunk_size >> 9; in set_array_info()
6249 mddev->max_disks = MD_SB_DISKS; in set_array_info()
6251 if (mddev->persistent) in set_array_info()
6252 mddev->flags = 0; in set_array_info()
6253 set_bit(MD_CHANGE_DEVS, &mddev->flags); in set_array_info()
6255 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in set_array_info()
6256 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in set_array_info()
6257 mddev->bitmap_info.offset = 0; in set_array_info()
6259 mddev->reshape_position = MaxSector; in set_array_info()
6264 get_random_bytes(mddev->uuid, 16); in set_array_info()
6266 mddev->new_level = mddev->level; in set_array_info()
6267 mddev->new_chunk_sectors = mddev->chunk_sectors; in set_array_info()
6268 mddev->new_layout = mddev->layout; in set_array_info()
6269 mddev->delta_disks = 0; in set_array_info()
6270 mddev->reshape_backwards = 0; in set_array_info()
6275 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
6277 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); in md_set_array_sectors()
6279 if (mddev->external_size) in md_set_array_sectors()
6282 mddev->array_sectors = array_sectors; in md_set_array_sectors()
6286 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
6292 if (mddev->pers->resize == NULL) in update_size()
6303 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
6304 mddev->sync_thread) in update_size()
6306 if (mddev->ro) in update_size()
6309 rdev_for_each(rdev, mddev) { in update_size()
6317 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
6319 revalidate_disk(mddev->gendisk); in update_size()
6323 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
6328 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
6330 if (mddev->ro) in update_raid_disks()
6333 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
6335 if (mddev->sync_thread || in update_raid_disks()
6336 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
6337 mddev->reshape_position != MaxSector) in update_raid_disks()
6340 rdev_for_each(rdev, mddev) { in update_raid_disks()
6341 if (mddev->raid_disks < raid_disks && in update_raid_disks()
6344 if (mddev->raid_disks > raid_disks && in update_raid_disks()
6349 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
6350 if (mddev->delta_disks < 0) in update_raid_disks()
6351 mddev->reshape_backwards = 1; in update_raid_disks()
6352 else if (mddev->delta_disks > 0) in update_raid_disks()
6353 mddev->reshape_backwards = 0; in update_raid_disks()
6355 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
6357 mddev->delta_disks = 0; in update_raid_disks()
6358 mddev->reshape_backwards = 0; in update_raid_disks()
6371 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
6378 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
6381 if (mddev->major_version != info->major_version || in update_array_info()
6382 mddev->minor_version != info->minor_version || in update_array_info()
6384 mddev->ctime != info->ctime || in update_array_info()
6385 mddev->level != info->level || in update_array_info()
6387 mddev->persistent != !info->not_persistent || in update_array_info()
6388 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
6394 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
6396 if (mddev->raid_disks != info->raid_disks) in update_array_info()
6398 if (mddev->layout != info->layout) in update_array_info()
6407 if (mddev->layout != info->layout) { in update_array_info()
6412 if (mddev->pers->check_reshape == NULL) in update_array_info()
6415 mddev->new_layout = info->layout; in update_array_info()
6416 rv = mddev->pers->check_reshape(mddev); in update_array_info()
6418 mddev->new_layout = mddev->layout; in update_array_info()
6422 if (mddev_is_clustered(mddev)) in update_array_info()
6423 md_cluster_ops->metadata_update_start(mddev); in update_array_info()
6424 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
6425 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
6427 if (mddev->raid_disks != info->raid_disks) in update_array_info()
6428 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
6431 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
6435 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
6442 if (mddev->bitmap) { in update_array_info()
6446 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
6450 mddev->bitmap_info.offset = in update_array_info()
6451 mddev->bitmap_info.default_offset; in update_array_info()
6452 mddev->bitmap_info.space = in update_array_info()
6453 mddev->bitmap_info.default_space; in update_array_info()
6454 mddev->pers->quiesce(mddev, 1); in update_array_info()
6455 bitmap = bitmap_create(mddev, -1); in update_array_info()
6457 mddev->bitmap = bitmap; in update_array_info()
6458 rv = bitmap_load(mddev); in update_array_info()
6462 bitmap_destroy(mddev); in update_array_info()
6463 mddev->pers->quiesce(mddev, 0); in update_array_info()
6466 if (!mddev->bitmap) { in update_array_info()
6470 if (mddev->bitmap->storage.file) { in update_array_info()
6474 mddev->pers->quiesce(mddev, 1); in update_array_info()
6475 bitmap_destroy(mddev); in update_array_info()
6476 mddev->pers->quiesce(mddev, 0); in update_array_info()
6477 mddev->bitmap_info.offset = 0; in update_array_info()
6480 md_update_sb(mddev, 1); in update_array_info()
6481 if (mddev_is_clustered(mddev)) in update_array_info()
6482 md_cluster_ops->metadata_update_finish(mddev); in update_array_info()
6485 if (mddev_is_clustered(mddev)) in update_array_info()
6486 md_cluster_ops->metadata_update_cancel(mddev); in update_array_info()
6490 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
6495 if (mddev->pers == NULL) in set_disk_faulty()
6499 rdev = find_rdev_rcu(mddev, dev); in set_disk_faulty()
6503 md_error(mddev, rdev); in set_disk_faulty()
6519 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
6523 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
6558 struct mddev *mddev = NULL; in md_ioctl() local
6596 mddev = bdev->bd_disk->private_data; in md_ioctl()
6598 if (!mddev) { in md_ioctl()
6606 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
6609 err = get_array_info(mddev, argp); in md_ioctl()
6613 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
6616 err = get_disk_info(mddev, argp); in md_ioctl()
6620 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
6624 err = get_bitmap_file(mddev, argp); in md_ioctl()
6635 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
6637 &mddev->flags), in md_ioctl()
6643 mutex_lock(&mddev->open_mutex); in md_ioctl()
6644 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
6645 mutex_unlock(&mddev->open_mutex); in md_ioctl()
6649 set_bit(MD_STILL_CLOSED, &mddev->flags); in md_ioctl()
6650 mutex_unlock(&mddev->open_mutex); in md_ioctl()
6653 err = mddev_lock(mddev); in md_ioctl()
6669 if (mddev->pers) { in md_ioctl()
6670 err = update_array_info(mddev, &info); in md_ioctl()
6678 if (!list_empty(&mddev->disks)) { in md_ioctl()
6681 mdname(mddev)); in md_ioctl()
6685 if (mddev->raid_disks) { in md_ioctl()
6688 mdname(mddev)); in md_ioctl()
6692 err = set_array_info(mddev, &info); in md_ioctl()
6706 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
6719 err = restart_array(mddev); in md_ioctl()
6723 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
6727 err = md_set_readonly(mddev, bdev); in md_ioctl()
6731 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
6739 if (mddev->pers) { in md_ioctl()
6747 err = add_new_disk(mddev, &info); in md_ioctl()
6766 if (mddev->ro != 1) in md_ioctl()
6772 if (mddev->pers) { in md_ioctl()
6773 err = restart_array(mddev); in md_ioctl()
6775 mddev->ro = 2; in md_ioctl()
6776 set_disk_ro(mddev->gendisk, 0); in md_ioctl()
6786 if (mddev->ro && mddev->pers) { in md_ioctl()
6787 if (mddev->ro == 2) { in md_ioctl()
6788 mddev->ro = 0; in md_ioctl()
6789 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
6790 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
6795 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { in md_ioctl()
6796 mddev_unlock(mddev); in md_ioctl()
6797 wait_event(mddev->sb_wait, in md_ioctl()
6798 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && in md_ioctl()
6799 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in md_ioctl()
6800 mddev_lock_nointr(mddev); in md_ioctl()
6815 err = add_new_disk(mddev, &info); in md_ioctl()
6820 if (mddev_is_clustered(mddev)) in md_ioctl()
6821 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
6827 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
6831 err = do_md_run(mddev); in md_ioctl()
6835 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
6844 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
6846 mddev->hold_active = 0; in md_ioctl()
6847 mddev_unlock(mddev); in md_ioctl()
6877 struct mddev *mddev = mddev_find(bdev->bd_dev); in md_open() local
6880 if (!mddev) in md_open()
6883 if (mddev->gendisk != bdev->bd_disk) { in md_open()
6887 mddev_put(mddev); in md_open()
6893 BUG_ON(mddev != bdev->bd_disk->private_data); in md_open()
6895 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) in md_open()
6899 atomic_inc(&mddev->openers); in md_open()
6900 clear_bit(MD_STILL_CLOSED, &mddev->flags); in md_open()
6901 mutex_unlock(&mddev->open_mutex); in md_open()
6910 struct mddev *mddev = disk->private_data; in md_release() local
6912 BUG_ON(!mddev); in md_release()
6913 atomic_dec(&mddev->openers); in md_release()
6914 mddev_put(mddev); in md_release()
6919 struct mddev *mddev = disk->private_data; in md_media_changed() local
6921 return mddev->changed; in md_media_changed()
6926 struct mddev *mddev = disk->private_data; in md_revalidate() local
6928 mddev->changed = 0; in md_revalidate()
6997 struct mddev *mddev, const char *name) in md_register_thread() argument
7008 thread->mddev = mddev; in md_register_thread()
7012 mdname(thread->mddev), in md_register_thread()
7040 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7045 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
7047 mddev->pers->error_handler(mddev,rdev); in md_error()
7048 if (mddev->degraded) in md_error()
7049 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
7051 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
7052 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
7053 md_wakeup_thread(mddev->thread); in md_error()
7054 if (mddev->event_work.func) in md_error()
7055 queue_work(md_misc_wq, &mddev->event_work); in md_error()
7056 md_new_event_inintr(mddev); in md_error()
7081 static void status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
7089 if (mddev->curr_resync <= 3) in status_resync()
7092 resync = mddev->curr_resync in status_resync()
7093 - atomic_read(&mddev->recovery_active); in status_resync()
7095 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
7096 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
7097 max_sectors = mddev->resync_max_sectors; in status_resync()
7099 max_sectors = mddev->dev_sectors; in status_resync()
7127 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
7129 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
7131 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
7151 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
7153 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) in status_resync()
7154 - mddev->resync_mark_cnt; in status_resync()
7171 struct mddev *mddev; in md_seq_start() local
7182 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
7183 mddev_get(mddev); in md_seq_start()
7185 return mddev; in md_seq_start()
7196 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
7206 tmp = mddev->all_mddevs.next; in md_seq_next()
7208 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); in md_seq_next()
7216 mddev_put(mddev); in md_seq_next()
7223 struct mddev *mddev = v; in md_seq_stop() local
7225 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
7226 mddev_put(mddev); in md_seq_stop()
7231 struct mddev *mddev = v; in md_seq_show() local
7252 spin_lock(&mddev->lock); in md_seq_show()
7253 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
7254 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
7255 mddev->pers ? "" : "in"); in md_seq_show()
7256 if (mddev->pers) { in md_seq_show()
7257 if (mddev->ro==1) in md_seq_show()
7259 if (mddev->ro==2) in md_seq_show()
7261 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
7266 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
7284 if (!list_empty(&mddev->disks)) { in md_seq_show()
7285 if (mddev->pers) in md_seq_show()
7288 mddev->array_sectors / 2); in md_seq_show()
7293 if (mddev->persistent) { in md_seq_show()
7294 if (mddev->major_version != 0 || in md_seq_show()
7295 mddev->minor_version != 90) { in md_seq_show()
7297 mddev->major_version, in md_seq_show()
7298 mddev->minor_version); in md_seq_show()
7300 } else if (mddev->external) in md_seq_show()
7302 mddev->metadata_type); in md_seq_show()
7306 if (mddev->pers) { in md_seq_show()
7307 mddev->pers->status(seq, mddev); in md_seq_show()
7309 if (mddev->pers->sync_request) { in md_seq_show()
7310 if (mddev->curr_resync > 2) { in md_seq_show()
7311 status_resync(seq, mddev); in md_seq_show()
7313 } else if (mddev->curr_resync >= 1) in md_seq_show()
7315 else if (mddev->recovery_cp < MaxSector) in md_seq_show()
7321 bitmap_status(seq, mddev->bitmap); in md_seq_show()
7325 spin_unlock(&mddev->lock); in md_seq_show()
7420 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
7437 return md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
7440 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
7444 md_cluster_ops->leave(mddev); in md_cluster_stop()
7448 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
7456 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
7492 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
7495 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
7496 wake_up(&mddev->recovery_wait); in md_done_sync()
7498 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
7499 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
7500 md_wakeup_thread(mddev->thread); in md_done_sync()
7511 void md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
7517 BUG_ON(mddev->ro == 1); in md_write_start()
7518 if (mddev->ro == 2) { in md_write_start()
7520 mddev->ro = 0; in md_write_start()
7521 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
7522 md_wakeup_thread(mddev->thread); in md_write_start()
7523 md_wakeup_thread(mddev->sync_thread); in md_write_start()
7526 atomic_inc(&mddev->writes_pending); in md_write_start()
7527 if (mddev->safemode == 1) in md_write_start()
7528 mddev->safemode = 0; in md_write_start()
7529 if (mddev->in_sync) { in md_write_start()
7530 spin_lock(&mddev->lock); in md_write_start()
7531 if (mddev->in_sync) { in md_write_start()
7532 mddev->in_sync = 0; in md_write_start()
7533 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_write_start()
7534 set_bit(MD_CHANGE_PENDING, &mddev->flags); in md_write_start()
7535 md_wakeup_thread(mddev->thread); in md_write_start()
7538 spin_unlock(&mddev->lock); in md_write_start()
7541 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
7542 wait_event(mddev->sb_wait, in md_write_start()
7543 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); in md_write_start()
7547 void md_write_end(struct mddev *mddev) in md_write_end() argument
7549 if (atomic_dec_and_test(&mddev->writes_pending)) { in md_write_end()
7550 if (mddev->safemode == 2) in md_write_end()
7551 md_wakeup_thread(mddev->thread); in md_write_end()
7552 else if (mddev->safemode_delay) in md_write_end()
7553 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); in md_write_end()
7567 int md_allow_write(struct mddev *mddev) in md_allow_write() argument
7569 if (!mddev->pers) in md_allow_write()
7571 if (mddev->ro) in md_allow_write()
7573 if (!mddev->pers->sync_request) in md_allow_write()
7576 spin_lock(&mddev->lock); in md_allow_write()
7577 if (mddev->in_sync) { in md_allow_write()
7578 mddev->in_sync = 0; in md_allow_write()
7579 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_allow_write()
7580 set_bit(MD_CHANGE_PENDING, &mddev->flags); in md_allow_write()
7581 if (mddev->safemode_delay && in md_allow_write()
7582 mddev->safemode == 0) in md_allow_write()
7583 mddev->safemode = 1; in md_allow_write()
7584 spin_unlock(&mddev->lock); in md_allow_write()
7585 if (mddev_is_clustered(mddev)) in md_allow_write()
7586 md_cluster_ops->metadata_update_start(mddev); in md_allow_write()
7587 md_update_sb(mddev, 0); in md_allow_write()
7588 if (mddev_is_clustered(mddev)) in md_allow_write()
7589 md_cluster_ops->metadata_update_finish(mddev); in md_allow_write()
7590 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
7592 spin_unlock(&mddev->lock); in md_allow_write()
7594 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) in md_allow_write()
7606 struct mddev *mddev = thread->mddev; in md_do_sync() local
7607 struct mddev *mddev2; in md_do_sync()
7623 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in md_do_sync()
7625 if (mddev->ro) {/* never try to sync a read-only array */ in md_do_sync()
7626 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
7630 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
7631 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
7634 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
7639 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
7644 mddev->last_sync_action = action ?: desc; in md_do_sync()
7663 mddev->curr_resync = 2; in md_do_sync()
7666 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7669 if (mddev2 == mddev) in md_do_sync()
7671 if (!mddev->parallel_resync in md_do_sync()
7673 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
7675 if (mddev < mddev2 && mddev->curr_resync == 2) { in md_do_sync()
7677 mddev->curr_resync = 1; in md_do_sync()
7680 if (mddev > mddev2 && mddev->curr_resync == 1) in md_do_sync()
7690 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
7691 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
7695 desc, mdname(mddev), mdname(mddev2)); in md_do_sync()
7706 } while (mddev->curr_resync < 2); in md_do_sync()
7709 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
7713 max_sectors = mddev->resync_max_sectors; in md_do_sync()
7714 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
7716 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
7717 j = mddev->resync_min; in md_do_sync()
7718 else if (!mddev->bitmap) in md_do_sync()
7719 j = mddev->recovery_cp; in md_do_sync()
7721 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
7722 max_sectors = mddev->resync_max_sectors; in md_do_sync()
7725 max_sectors = mddev->dev_sectors; in md_do_sync()
7728 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7744 if (mddev->bitmap) { in md_do_sync()
7745 mddev->pers->quiesce(mddev, 1); in md_do_sync()
7746 mddev->pers->quiesce(mddev, 0); in md_do_sync()
7750 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
7752 " %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
7755 speed_max(mddev), desc); in md_do_sync()
7757 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
7765 mddev->resync_mark = mark[last_mark]; in md_do_sync()
7766 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
7775 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
7781 desc, mdname(mddev)); in md_do_sync()
7782 mddev->curr_resync = j; in md_do_sync()
7784 mddev->curr_resync = 3; /* no longer delayed */ in md_do_sync()
7785 mddev->curr_resync_completed = j; in md_do_sync()
7786 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_do_sync()
7787 md_new_event(mddev); in md_do_sync()
7790 if (mddev_is_clustered(mddev)) in md_do_sync()
7791 md_cluster_ops->resync_start(mddev, j, max_sectors); in md_do_sync()
7799 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
7800 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
7801 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
7804 (j - mddev->curr_resync_completed)*2 in md_do_sync()
7805 >= mddev->resync_max - mddev->curr_resync_completed in md_do_sync()
7808 wait_event(mddev->recovery_wait, in md_do_sync()
7809 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
7810 mddev->curr_resync_completed = j; in md_do_sync()
7811 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
7812 j > mddev->recovery_cp) in md_do_sync()
7813 mddev->recovery_cp = j; in md_do_sync()
7815 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_do_sync()
7816 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in md_do_sync()
7819 while (j >= mddev->resync_max && in md_do_sync()
7820 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
7826 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
7827 mddev->resync_max > j in md_do_sync()
7829 &mddev->recovery)); in md_do_sync()
7832 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7835 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
7837 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
7843 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
7846 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7851 mddev->curr_resync = j; in md_do_sync()
7852 if (mddev_is_clustered(mddev)) in md_do_sync()
7853 md_cluster_ops->resync_info_update(mddev, j, max_sectors); in md_do_sync()
7854 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
7859 md_new_event(mddev); in md_do_sync()
7870 mddev->resync_mark = mark[next]; in md_do_sync()
7871 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
7873 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
7877 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7890 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
7891 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
7892 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
7894 if (currspeed > speed_min(mddev)) { in md_do_sync()
7895 if (currspeed > speed_max(mddev)) { in md_do_sync()
7899 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
7904 wait_event(mddev->recovery_wait, in md_do_sync()
7905 !atomic_read(&mddev->recovery_active)); in md_do_sync()
7909 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
7910 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
7916 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
7919 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
7921 if (mddev_is_clustered(mddev)) in md_do_sync()
7922 md_cluster_ops->resync_finish(mddev); in md_do_sync()
7924 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
7925 mddev->curr_resync > 2) { in md_do_sync()
7926 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
7927 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
7928 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
7931 desc, mdname(mddev)); in md_do_sync()
7933 &mddev->recovery)) in md_do_sync()
7934 mddev->recovery_cp = in md_do_sync()
7935 mddev->curr_resync_completed; in md_do_sync()
7937 mddev->recovery_cp = in md_do_sync()
7938 mddev->curr_resync; in md_do_sync()
7941 mddev->recovery_cp = MaxSector; in md_do_sync()
7943 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
7944 mddev->curr_resync = MaxSector; in md_do_sync()
7946 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
7948 mddev->delta_disks >= 0 && in md_do_sync()
7951 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
7952 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
7957 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_do_sync()
7959 spin_lock(&mddev->lock); in md_do_sync()
7960 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
7962 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
7963 mddev->resync_min = 0; in md_do_sync()
7964 mddev->resync_max = MaxSector; in md_do_sync()
7965 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
7966 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
7967 mddev->curr_resync = 0; in md_do_sync()
7968 spin_unlock(&mddev->lock); in md_do_sync()
7971 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
7972 md_wakeup_thread(mddev->thread); in md_do_sync()
7977 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
7984 rdev_for_each(rdev, mddev) in remove_and_add_spares()
7991 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
7992 mddev, rdev) == 0) { in remove_and_add_spares()
7993 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
7998 if (removed && mddev->kobj.sd) in remove_and_add_spares()
7999 sysfs_notify(&mddev->kobj, NULL, "degraded"); in remove_and_add_spares()
8004 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
8013 if (mddev->ro && in remove_and_add_spares()
8019 if (mddev->pers-> in remove_and_add_spares()
8020 hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
8021 if (sysfs_link_rdev(mddev, rdev)) in remove_and_add_spares()
8024 md_new_event(mddev); in remove_and_add_spares()
8025 set_bit(MD_CHANGE_DEVS, &mddev->flags); in remove_and_add_spares()
8030 set_bit(MD_CHANGE_DEVS, &mddev->flags); in remove_and_add_spares()
8036 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
8038 mddev->sync_thread = md_register_thread(md_do_sync, in md_start_sync()
8039 mddev, in md_start_sync()
8041 if (!mddev->sync_thread) { in md_start_sync()
8044 mdname(mddev)); in md_start_sync()
8046 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
8047 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
8048 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
8049 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
8050 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
8053 &mddev->recovery)) in md_start_sync()
8054 if (mddev->sysfs_action) in md_start_sync()
8055 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
8057 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
8058 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
8059 md_new_event(mddev); in md_start_sync()
8084 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
8086 if (mddev->suspended) in md_check_recovery()
8089 if (mddev->bitmap) in md_check_recovery()
8090 bitmap_daemon_work(mddev); in md_check_recovery()
8093 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
8095 mdname(mddev)); in md_check_recovery()
8096 mddev->safemode = 2; in md_check_recovery()
8101 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
8104 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || in md_check_recovery()
8105 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
8106 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
8107 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
8108 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) in md_check_recovery()
8109 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
8113 if (mddev_trylock(mddev)) { in md_check_recovery()
8116 if (mddev->ro) { in md_check_recovery()
8124 remove_and_add_spares(mddev, NULL); in md_check_recovery()
8128 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
8129 md_reap_sync_thread(mddev); in md_check_recovery()
8130 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
8134 if (!mddev->external) { in md_check_recovery()
8136 spin_lock(&mddev->lock); in md_check_recovery()
8137 if (mddev->safemode && in md_check_recovery()
8138 !atomic_read(&mddev->writes_pending) && in md_check_recovery()
8139 !mddev->in_sync && in md_check_recovery()
8140 mddev->recovery_cp == MaxSector) { in md_check_recovery()
8141 mddev->in_sync = 1; in md_check_recovery()
8143 set_bit(MD_CHANGE_CLEAN, &mddev->flags); in md_check_recovery()
8145 if (mddev->safemode == 1) in md_check_recovery()
8146 mddev->safemode = 0; in md_check_recovery()
8147 spin_unlock(&mddev->lock); in md_check_recovery()
8149 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_check_recovery()
8152 if (mddev->flags & MD_UPDATE_SB_FLAGS) { in md_check_recovery()
8153 if (mddev_is_clustered(mddev)) in md_check_recovery()
8154 md_cluster_ops->metadata_update_start(mddev); in md_check_recovery()
8155 md_update_sb(mddev, 0); in md_check_recovery()
8156 if (mddev_is_clustered(mddev)) in md_check_recovery()
8157 md_cluster_ops->metadata_update_finish(mddev); in md_check_recovery()
8160 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_check_recovery()
8161 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
8163 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
8166 if (mddev->sync_thread) { in md_check_recovery()
8167 md_reap_sync_thread(mddev); in md_check_recovery()
8173 mddev->curr_resync_completed = 0; in md_check_recovery()
8174 spin_lock(&mddev->lock); in md_check_recovery()
8175 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
8176 spin_unlock(&mddev->lock); in md_check_recovery()
8180 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
8181 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
8183 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
8184 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
8193 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
8194 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
8195 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
8198 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
8199 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8200 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
8201 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
8202 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
8203 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
8204 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8205 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
8206 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
8207 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
8208 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
8212 if (mddev->pers->sync_request) { in md_check_recovery()
8218 bitmap_write_all(mddev->bitmap); in md_check_recovery()
8220 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
8221 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
8225 if (!mddev->sync_thread) { in md_check_recovery()
8226 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
8229 &mddev->recovery)) in md_check_recovery()
8230 if (mddev->sysfs_action) in md_check_recovery()
8231 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
8234 wake_up(&mddev->sb_wait); in md_check_recovery()
8235 mddev_unlock(mddev); in md_check_recovery()
8240 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
8245 md_unregister_thread(&mddev->sync_thread); in md_reap_sync_thread()
8246 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
8247 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_reap_sync_thread()
8250 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
8251 sysfs_notify(&mddev->kobj, NULL, in md_reap_sync_thread()
8253 set_bit(MD_CHANGE_DEVS, &mddev->flags); in md_reap_sync_thread()
8256 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
8257 md_cluster_ops->metadata_update_start(mddev); in md_reap_sync_thread()
8258 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
8259 mddev->pers->finish_reshape) in md_reap_sync_thread()
8260 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
8265 if (!mddev->degraded) in md_reap_sync_thread()
8266 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
8269 md_update_sb(mddev, 1); in md_reap_sync_thread()
8270 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
8271 md_cluster_ops->metadata_update_finish(mddev); in md_reap_sync_thread()
8272 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
8273 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
8274 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
8275 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
8276 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
8277 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
8280 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
8281 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
8282 md_new_event(mddev); in md_reap_sync_thread()
8283 if (mddev->event_work.func) in md_reap_sync_thread()
8284 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
8288 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
8295 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
8299 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
8304 rdev_for_each(rdev, mddev) { in md_finish_reshape()
8575 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); in rdev_set_badblocks()
8576 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
8815 struct mddev *mddev; in md_notify_reboot() local
8818 for_each_mddev(mddev, tmp) { in md_notify_reboot()
8819 if (mddev_trylock(mddev)) { in md_notify_reboot()
8820 if (mddev->pers) in md_notify_reboot()
8821 __md_stop_writes(mddev); in md_notify_reboot()
8822 if (mddev->persistent) in md_notify_reboot()
8823 mddev->safemode = 2; in md_notify_reboot()
8824 mddev_unlock(mddev); in md_notify_reboot()
8893 void md_reload_sb(struct mddev *mddev) in md_reload_sb() argument
8897 rdev_for_each_safe(rdev, tmp, mddev) { in md_reload_sb()
8901 mddev->raid_disks = 0; in md_reload_sb()
8902 analyze_sbs(mddev); in md_reload_sb()
8903 rdev_for_each_safe(rdev, tmp, mddev) { in md_reload_sb()
8908 if (mddev->events > sb->events) in md_reload_sb()
8983 struct mddev *mddev; in md_exit() local
9007 for_each_mddev(mddev, tmp) { in md_exit()
9008 export_array(mddev); in md_exit()
9009 mddev->hold_active = 0; in md_exit()