Lines Matching refs:md
69 struct mddev md; member
165 mddev_init(&rs->md); in context_alloc()
169 rs->md.raid_disks = raid_devs; in context_alloc()
170 rs->md.level = raid_type->level; in context_alloc()
171 rs->md.new_level = rs->md.level; in context_alloc()
172 rs->md.layout = raid_type->algorithm; in context_alloc()
173 rs->md.new_layout = rs->md.layout; in context_alloc()
174 rs->md.delta_disks = 0; in context_alloc()
175 rs->md.recovery_cp = 0; in context_alloc()
196 for (i = 0; i < rs->md.raid_disks; i++) { in context_free()
230 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { in dev_parms()
241 rs->dev[i].rdev.mddev = &rs->md; in dev_parms()
283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); in dev_parms()
289 rs->md.external = 0; in dev_parms()
290 rs->md.persistent = 1; in dev_parms()
291 rs->md.major_version = 2; in dev_parms()
292 } else if (rebuild && !rs->md.recovery_cp) { in dev_parms()
360 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
369 rs->md.bitmap_info.chunksize = (region_size << 9); in validate_region_size()
389 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
396 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
406 copies = raid10_md_layout_to_copies(rs->md.layout); in validate_raid_redundancy()
424 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { in validate_raid_redundancy()
425 for (i = 0; i < rs->md.raid_disks * copies; i++) { in validate_raid_redundancy()
428 d = i % rs->md.raid_disks; in validate_raid_redundancy()
449 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
450 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
452 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
546 for (i = 0; i < rs->md.raid_disks; i++) { in parse_raid_params()
556 rs->md.recovery_cp = MaxSector; in parse_raid_params()
561 rs->md.recovery_cp = 0; in parse_raid_params()
598 if (value >= rs->md.raid_disks) { in parse_raid_params()
610 if (value >= rs->md.raid_disks) { in parse_raid_params()
631 rs->md.bitmap_info.max_write_behind = value; in parse_raid_params()
638 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
653 if (raid5_set_cache_size(&rs->md, (int)value)) { in parse_raid_params()
663 rs->md.sync_speed_min = (int)value; in parse_raid_params()
670 rs->md.sync_speed_max = (int)value; in parse_raid_params()
692 if (rs->md.chunk_sectors) in parse_raid_params()
693 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
701 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
717 sector_div(sectors_per_dev, rs->md.raid_disks); in parse_raid_params()
719 rs->md.layout = raid10_format_to_md_layout(raid10_format, in parse_raid_params()
721 rs->md.new_layout = rs->md.layout; in parse_raid_params()
724 (rs->md.raid_disks - rs->raid_type->parity_devs))) { in parse_raid_params()
728 rs->md.dev_sectors = sectors_per_dev; in parse_raid_params()
731 rs->md.persistent = 0; in parse_raid_params()
732 rs->md.external = 1; in parse_raid_params()
739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event()
748 return mddev_congested(&rs->md, bits); in raid_is_congested()
812 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync()
901 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_init_validation()
1030 struct mddev *mddev = &rs->md; in super_validate()
1073 struct mddev *mddev = &rs->md; in analyse_superblocks()
1170 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); in configure_discard_support()
1172 for (i = 0; i < rs->md.raid_disks; i++) { in configure_discard_support()
1200 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); in configure_discard_support()
1275 rs->md.sync_super = super_sync; in raid_ctr()
1280 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
1290 mddev_lock_nointr(&rs->md); in raid_ctr()
1291 ret = md_run(&rs->md); in raid_ctr()
1292 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
1293 mddev_unlock(&rs->md); in raid_ctr()
1300 if (ti->len != rs->md.array_sectors) { in raid_ctr()
1308 mddev_suspend(&rs->md); in raid_ctr()
1312 md_stop(&rs->md); in raid_ctr()
1324 md_stop(&rs->md); in raid_dtr()
1331 struct mddev *mddev = &rs->md; in raid_map()
1374 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); in raid_status()
1377 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) in raid_status()
1378 sync = rs->md.curr_resync_completed; in raid_status()
1380 sync = rs->md.recovery_cp; in raid_status()
1382 if (sync >= rs->md.resync_max_sectors) { in raid_status()
1387 sync = rs->md.resync_max_sectors; in raid_status()
1388 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { in raid_status()
1402 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1409 sync = rs->md.resync_max_sectors; in raid_status()
1418 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1438 (unsigned long long) rs->md.resync_max_sectors); in raid_status()
1445 DMEMIT(" %s", decipher_sync_action(&rs->md)); in raid_status()
1453 (strcmp(rs->md.last_sync_action, "check")) ? 0 : in raid_status()
1455 atomic64_read(&rs->md.resync_mismatches)); in raid_status()
1459 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1474 raid_param_cnt, rs->md.chunk_sectors); in raid_status()
1477 (rs->md.recovery_cp == MaxSector)) in raid_status()
1482 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1490 rs->md.bitmap_info.daemon_sleep); in raid_status()
1493 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); in raid_status()
1496 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); in raid_status()
1498 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1505 rs->md.bitmap_info.max_write_behind); in raid_status()
1508 struct r5conf *conf = rs->md.private; in raid_status()
1517 rs->md.bitmap_info.chunksize >> 9); in raid_status()
1521 raid10_md_layout_to_copies(rs->md.layout)); in raid_status()
1525 raid10_md_layout_to_format(rs->md.layout)); in raid_status()
1527 DMEMIT(" %d", rs->md.raid_disks); in raid_status()
1528 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1545 struct mddev *mddev = &rs->md; in raid_message()
1603 for (i = 0; !ret && i < rs->md.raid_disks; i++) in raid_iterate_devices()
1608 rs->md.dev_sectors, in raid_iterate_devices()
1617 unsigned chunk_size = rs->md.chunk_sectors << 9; in raid_io_hints()
1618 struct r5conf *conf = rs->md.private; in raid_io_hints()
1628 md_stop_writes(&rs->md); in raid_presuspend()
1635 mddev_suspend(&rs->md); in raid_postsuspend()
1646 for (i = 0; i < rs->md.raid_disks; i++) { in attempt_restore_of_faulty_devices()
1685 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
1699 set_bit(MD_CHANGE_DEVS, &rs->md.flags); in raid_resume()
1702 bitmap_load(&rs->md); in raid_resume()
1713 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_resume()
1716 mddev_resume(&rs->md); in raid_resume()