Lines Matching refs:ms
44 struct mirror_set *ms; member
92 struct mirror_set *ms = context; in wakeup_mirrord() local
94 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); in wakeup_mirrord()
99 struct mirror_set *ms = (struct mirror_set *) data; in delayed_wake_fn() local
101 clear_bit(0, &ms->timer_pending); in delayed_wake_fn()
102 wakeup_mirrord(ms); in delayed_wake_fn()
105 static void delayed_wake(struct mirror_set *ms) in delayed_wake() argument
107 if (test_and_set_bit(0, &ms->timer_pending)) in delayed_wake()
110 ms->timer.expires = jiffies + HZ / 5; in delayed_wake()
111 ms->timer.data = (unsigned long) ms; in delayed_wake()
112 ms->timer.function = delayed_wake_fn; in delayed_wake()
113 add_timer(&ms->timer); in delayed_wake()
121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument
127 bl = (rw == WRITE) ? &ms->writes : &ms->reads; in queue_bio()
128 spin_lock_irqsave(&ms->lock, flags); in queue_bio()
131 spin_unlock_irqrestore(&ms->lock, flags); in queue_bio()
134 wakeup_mirrord(ms); in queue_bio()
139 struct mirror_set *ms = context; in dispatch_bios() local
143 queue_bio(ms, bio, WRITE); in dispatch_bios()
173 static struct mirror *get_default_mirror(struct mirror_set *ms) in get_default_mirror() argument
175 return &ms->mirror[atomic_read(&ms->default_mirror)]; in get_default_mirror()
180 struct mirror_set *ms = m->ms; in set_default_mirror() local
181 struct mirror *m0 = &(ms->mirror[0]); in set_default_mirror()
183 atomic_set(&ms->default_mirror, m - m0); in set_default_mirror()
186 static struct mirror *get_valid_mirror(struct mirror_set *ms) in get_valid_mirror() argument
190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) in get_valid_mirror()
213 struct mirror_set *ms = m->ms; in fail_mirror() local
216 ms->leg_failure = 1; in fail_mirror()
228 if (!errors_handled(ms)) in fail_mirror()
231 if (m != get_default_mirror(ms)) in fail_mirror()
234 if (!ms->in_sync && !keep_log(ms)) { in fail_mirror()
244 new = get_valid_mirror(ms); in fail_mirror()
251 schedule_work(&ms->trigger_event); in fail_mirror()
256 struct mirror_set *ms = ti->private; in mirror_flush() local
260 struct dm_io_region io[ms->nr_mirrors]; in mirror_flush()
266 .client = ms->io_client, in mirror_flush()
269 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { in mirror_flush()
276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); in mirror_flush()
278 for (i = 0; i < ms->nr_mirrors; i++) in mirror_flush()
280 fail_mirror(ms->mirror + i, in mirror_flush()
299 struct mirror_set *ms = dm_rh_region_context(reg); in recovery_complete() local
305 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); in recovery_complete()
315 for (m = 0; m < ms->nr_mirrors; m++) { in recovery_complete()
316 if (&ms->mirror[m] == get_default_mirror(ms)) in recovery_complete()
319 fail_mirror(ms->mirror + m, in recovery_complete()
328 static int recover(struct mirror_set *ms, struct dm_region *reg) in recover() argument
336 sector_t region_size = dm_rh_get_region_size(ms->rh); in recover()
339 m = get_default_mirror(ms); in recover()
341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
342 if (key == (ms->nr_regions - 1)) { in recover()
347 from.count = ms->ti->len & (region_size - 1); in recover()
354 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { in recover()
355 if (&ms->mirror[i] == get_default_mirror(ms)) in recover()
358 m = ms->mirror + i; in recover()
360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
366 if (!errors_handled(ms)) in recover()
369 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, in recover()
375 static void reset_ms_flags(struct mirror_set *ms) in reset_ms_flags() argument
379 ms->leg_failure = 0; in reset_ms_flags()
380 for (m = 0; m < ms->nr_mirrors; m++) { in reset_ms_flags()
381 atomic_set(&(ms->mirror[m].error_count), 0); in reset_ms_flags()
382 ms->mirror[m].error_type = 0; in reset_ms_flags()
386 static void do_recovery(struct mirror_set *ms) in do_recovery() argument
389 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_recovery()
395 dm_rh_recovery_prepare(ms->rh); in do_recovery()
400 while ((reg = dm_rh_recovery_start(ms->rh))) { in do_recovery()
401 r = recover(ms, reg); in do_recovery()
409 if (!ms->in_sync && in do_recovery()
410 (log->type->get_sync_count(log) == ms->nr_regions)) { in do_recovery()
412 dm_table_event(ms->ti->table); in do_recovery()
413 ms->in_sync = 1; in do_recovery()
414 reset_ms_flags(ms); in do_recovery()
421 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) in choose_mirror() argument
423 struct mirror *m = get_default_mirror(ms); in choose_mirror()
429 if (m-- == ms->mirror) in choose_mirror()
430 m += ms->nr_mirrors; in choose_mirror()
431 } while (m != get_default_mirror(ms)); in choose_mirror()
438 struct mirror *default_mirror = get_default_mirror(m->ms); in default_ok()
443 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
445 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_available()
446 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available()
449 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
461 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
478 static void hold_bio(struct mirror_set *ms, struct bio *bio) in hold_bio() argument
484 spin_lock_irq(&ms->lock); in hold_bio()
486 if (atomic_read(&ms->suspend)) { in hold_bio()
487 spin_unlock_irq(&ms->lock); in hold_bio()
492 if (dm_noflush_suspending(ms->ti)) in hold_bio()
504 bio_list_add(&ms->holds, bio); in hold_bio()
505 spin_unlock_irq(&ms->lock); in hold_bio()
526 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { in read_callback()
530 queue_bio(m->ms, bio, bio_rw(bio)); in read_callback()
549 .client = m->ms->io_client, in read_async_bio()
557 static inline int region_in_sync(struct mirror_set *ms, region_t region, in region_in_sync() argument
560 int state = dm_rh_get_state(ms->rh, region, may_block); in region_in_sync()
564 static void do_reads(struct mirror_set *ms, struct bio_list *reads) in do_reads() argument
571 region = dm_rh_bio_to_region(ms->rh, bio); in do_reads()
572 m = get_default_mirror(ms); in do_reads()
577 if (likely(region_in_sync(ms, region, 1))) in do_reads()
578 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
605 struct mirror_set *ms; in write_callback() local
609 ms = bio_get_m(bio)->ms; in write_callback()
633 for (i = 0; i < ms->nr_mirrors; i++) in write_callback()
635 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); in write_callback()
642 spin_lock_irqsave(&ms->lock, flags); in write_callback()
643 if (!ms->failures.head) in write_callback()
645 bio_list_add(&ms->failures, bio); in write_callback()
646 spin_unlock_irqrestore(&ms->lock, flags); in write_callback()
648 wakeup_mirrord(ms); in write_callback()
651 static void do_write(struct mirror_set *ms, struct bio *bio) in do_write() argument
654 struct dm_io_region io[ms->nr_mirrors], *dest = io; in do_write()
662 .client = ms->io_client, in do_write()
671 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) in do_write()
678 bio_set_m(bio, get_default_mirror(ms)); in do_write()
680 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); in do_write()
683 static void do_writes(struct mirror_set *ms, struct bio_list *writes) in do_writes() argument
689 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_writes()
710 region = dm_rh_bio_to_region(ms->rh, bio); in do_writes()
718 state = dm_rh_get_state(ms->rh, region, 1); in do_writes()
742 spin_lock_irq(&ms->lock); in do_writes()
743 bio_list_merge(&ms->writes, &requeue); in do_writes()
744 spin_unlock_irq(&ms->lock); in do_writes()
745 delayed_wake(ms); in do_writes()
753 dm_rh_inc_pending(ms->rh, &sync); in do_writes()
754 dm_rh_inc_pending(ms->rh, &nosync); in do_writes()
761 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; in do_writes()
766 if (unlikely(ms->log_failure) && errors_handled(ms)) { in do_writes()
767 spin_lock_irq(&ms->lock); in do_writes()
768 bio_list_merge(&ms->failures, &sync); in do_writes()
769 spin_unlock_irq(&ms->lock); in do_writes()
770 wakeup_mirrord(ms); in do_writes()
773 do_write(ms, bio); in do_writes()
776 dm_rh_delay(ms->rh, bio); in do_writes()
779 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { in do_writes()
780 spin_lock_irq(&ms->lock); in do_writes()
781 bio_list_add(&ms->failures, bio); in do_writes()
782 spin_unlock_irq(&ms->lock); in do_writes()
783 wakeup_mirrord(ms); in do_writes()
785 map_bio(get_default_mirror(ms), bio); in do_writes()
791 static void do_failures(struct mirror_set *ms, struct bio_list *failures) in do_failures() argument
816 if (!ms->log_failure) { in do_failures()
817 ms->in_sync = 0; in do_failures()
818 dm_rh_mark_nosync(ms->rh, bio); in do_failures()
834 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) in do_failures()
836 else if (errors_handled(ms) && !keep_log(ms)) in do_failures()
837 hold_bio(ms, bio); in do_failures()
845 struct mirror_set *ms = in trigger_event() local
848 dm_table_event(ms->ti->table); in trigger_event()
856 struct mirror_set *ms = container_of(work, struct mirror_set, in do_mirror() local
861 spin_lock_irqsave(&ms->lock, flags); in do_mirror()
862 reads = ms->reads; in do_mirror()
863 writes = ms->writes; in do_mirror()
864 failures = ms->failures; in do_mirror()
865 bio_list_init(&ms->reads); in do_mirror()
866 bio_list_init(&ms->writes); in do_mirror()
867 bio_list_init(&ms->failures); in do_mirror()
868 spin_unlock_irqrestore(&ms->lock, flags); in do_mirror()
870 dm_rh_update_states(ms->rh, errors_handled(ms)); in do_mirror()
871 do_recovery(ms); in do_mirror()
872 do_reads(ms, &reads); in do_mirror()
873 do_writes(ms, &writes); in do_mirror()
874 do_failures(ms, &failures); in do_mirror()
886 struct mirror_set *ms = NULL; in alloc_context() local
888 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); in alloc_context()
890 ms = kzalloc(len, GFP_KERNEL); in alloc_context()
891 if (!ms) { in alloc_context()
896 spin_lock_init(&ms->lock); in alloc_context()
897 bio_list_init(&ms->reads); in alloc_context()
898 bio_list_init(&ms->writes); in alloc_context()
899 bio_list_init(&ms->failures); in alloc_context()
900 bio_list_init(&ms->holds); in alloc_context()
902 ms->ti = ti; in alloc_context()
903 ms->nr_mirrors = nr_mirrors; in alloc_context()
904 ms->nr_regions = dm_sector_div_up(ti->len, region_size); in alloc_context()
905 ms->in_sync = 0; in alloc_context()
906 ms->log_failure = 0; in alloc_context()
907 ms->leg_failure = 0; in alloc_context()
908 atomic_set(&ms->suspend, 0); in alloc_context()
909 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); in alloc_context()
911 ms->io_client = dm_io_client_create(); in alloc_context()
912 if (IS_ERR(ms->io_client)) { in alloc_context()
914 kfree(ms); in alloc_context()
918 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, in alloc_context()
920 ms->ti->begin, MAX_RECOVERY, in alloc_context()
921 dl, region_size, ms->nr_regions); in alloc_context()
922 if (IS_ERR(ms->rh)) { in alloc_context()
924 dm_io_client_destroy(ms->io_client); in alloc_context()
925 kfree(ms); in alloc_context()
929 return ms; in alloc_context()
932 static void free_context(struct mirror_set *ms, struct dm_target *ti, in free_context() argument
936 dm_put_device(ti, ms->mirror[m].dev); in free_context()
938 dm_io_client_destroy(ms->io_client); in free_context()
939 dm_region_hash_destroy(ms->rh); in free_context()
940 kfree(ms); in free_context()
943 static int get_mirror(struct mirror_set *ms, struct dm_target *ti, in get_mirror() argument
956 &ms->mirror[mirror].dev); in get_mirror()
962 ms->mirror[mirror].ms = ms; in get_mirror()
963 atomic_set(&(ms->mirror[mirror].error_count), 0); in get_mirror()
964 ms->mirror[mirror].error_type = 0; in get_mirror()
965 ms->mirror[mirror].offset = offset; in get_mirror()
1008 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, in parse_features() argument
1012 struct dm_target *ti = ms->ti; in parse_features()
1037 ms->features |= DM_RAID1_HANDLE_ERRORS; in parse_features()
1039 ms->features |= DM_RAID1_KEEP_LOG; in parse_features()
1049 if (!errors_handled(ms) && keep_log(ms)) { in parse_features()
1073 struct mirror_set *ms; in mirror_ctr() local
1099 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); in mirror_ctr()
1100 if (!ms) { in mirror_ctr()
1107 r = get_mirror(ms, ti, m, argv); in mirror_ctr()
1109 free_context(ms, ti, m); in mirror_ctr()
1116 ti->private = ms; in mirror_ctr()
1118 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); in mirror_ctr()
1127 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); in mirror_ctr()
1128 if (!ms->kmirrord_wq) { in mirror_ctr()
1133 INIT_WORK(&ms->kmirrord_work, do_mirror); in mirror_ctr()
1134 init_timer(&ms->timer); in mirror_ctr()
1135 ms->timer_pending = 0; in mirror_ctr()
1136 INIT_WORK(&ms->trigger_event, trigger_event); in mirror_ctr()
1138 r = parse_features(ms, argc, argv, &args_used); in mirror_ctr()
1160 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in mirror_ctr()
1161 if (IS_ERR(ms->kcopyd_client)) { in mirror_ctr()
1162 r = PTR_ERR(ms->kcopyd_client); in mirror_ctr()
1166 wakeup_mirrord(ms); in mirror_ctr()
1170 destroy_workqueue(ms->kmirrord_wq); in mirror_ctr()
1172 free_context(ms, ti, ms->nr_mirrors); in mirror_ctr()
1178 struct mirror_set *ms = (struct mirror_set *) ti->private; in mirror_dtr() local
1180 del_timer_sync(&ms->timer); in mirror_dtr()
1181 flush_workqueue(ms->kmirrord_wq); in mirror_dtr()
1182 flush_work(&ms->trigger_event); in mirror_dtr()
1183 dm_kcopyd_client_destroy(ms->kcopyd_client); in mirror_dtr()
1184 destroy_workqueue(ms->kmirrord_wq); in mirror_dtr()
1185 free_context(ms, ti, ms->nr_mirrors); in mirror_dtr()
1195 struct mirror_set *ms = ti->private; in mirror_map() local
1196 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_map()
1204 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); in mirror_map()
1205 queue_bio(ms, bio, rw); in mirror_map()
1209 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); in mirror_map()
1220 queue_bio(ms, bio, rw); in mirror_map()
1228 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
1243 struct mirror_set *ms = (struct mirror_set *) ti->private; in mirror_end_io() local
1254 dm_rh_dec(ms->rh, bio_record->write_region); in mirror_end_io()
1286 if (default_ok(m) || mirror_available(ms, bio)) { in mirror_end_io()
1292 queue_bio(ms, bio, rw); in mirror_end_io()
1306 struct mirror_set *ms = (struct mirror_set *) ti->private; in mirror_presuspend() local
1307 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_presuspend()
1312 atomic_set(&ms->suspend, 1); in mirror_presuspend()
1320 spin_lock_irq(&ms->lock); in mirror_presuspend()
1321 holds = ms->holds; in mirror_presuspend()
1322 bio_list_init(&ms->holds); in mirror_presuspend()
1323 spin_unlock_irq(&ms->lock); in mirror_presuspend()
1326 hold_bio(ms, bio); in mirror_presuspend()
1332 dm_rh_stop_recovery(ms->rh); in mirror_presuspend()
1335 !dm_rh_recovery_in_flight(ms->rh)); in mirror_presuspend()
1347 flush_workqueue(ms->kmirrord_wq); in mirror_presuspend()
1352 struct mirror_set *ms = ti->private; in mirror_postsuspend() local
1353 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_postsuspend()
1362 struct mirror_set *ms = ti->private; in mirror_resume() local
1363 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_resume()
1365 atomic_set(&ms->suspend, 0); in mirror_resume()
1369 dm_rh_start_recovery(ms->rh); in mirror_resume()
1402 struct mirror_set *ms = (struct mirror_set *) ti->private; in mirror_status() local
1403 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_status()
1404 char buffer[ms->nr_mirrors + 1]; in mirror_status()
1408 DMEMIT("%d ", ms->nr_mirrors); in mirror_status()
1409 for (m = 0; m < ms->nr_mirrors; m++) { in mirror_status()
1410 DMEMIT("%s ", ms->mirror[m].dev->name); in mirror_status()
1411 buffer[m] = device_status_char(&(ms->mirror[m])); in mirror_status()
1417 (unsigned long long)ms->nr_regions, buffer); in mirror_status()
1426 DMEMIT("%d", ms->nr_mirrors); in mirror_status()
1427 for (m = 0; m < ms->nr_mirrors; m++) in mirror_status()
1428 DMEMIT(" %s %llu", ms->mirror[m].dev->name, in mirror_status()
1429 (unsigned long long)ms->mirror[m].offset); in mirror_status()
1431 num_feature_args += !!errors_handled(ms); in mirror_status()
1432 num_feature_args += !!keep_log(ms); in mirror_status()
1435 if (errors_handled(ms)) in mirror_status()
1437 if (keep_log(ms)) in mirror_status()
1448 struct mirror_set *ms = ti->private; in mirror_iterate_devices() local
1452 for (i = 0; !ret && i < ms->nr_mirrors; i++) in mirror_iterate_devices()
1453 ret = fn(ti, ms->mirror[i].dev, in mirror_iterate_devices()
1454 ms->mirror[i].offset, ti->len, data); in mirror_iterate_devices()