Lines Matching refs:md
68 struct mddev md; member
163 mddev_init(&rs->md); in context_alloc()
167 rs->md.raid_disks = raid_devs; in context_alloc()
168 rs->md.level = raid_type->level; in context_alloc()
169 rs->md.new_level = rs->md.level; in context_alloc()
170 rs->md.layout = raid_type->algorithm; in context_alloc()
171 rs->md.new_layout = rs->md.layout; in context_alloc()
172 rs->md.delta_disks = 0; in context_alloc()
173 rs->md.recovery_cp = 0; in context_alloc()
194 for (i = 0; i < rs->md.raid_disks; i++) { in context_free()
228 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { in dev_parms()
239 rs->dev[i].rdev.mddev = &rs->md; in dev_parms()
281 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); in dev_parms()
287 rs->md.external = 0; in dev_parms()
288 rs->md.persistent = 1; in dev_parms()
289 rs->md.major_version = 2; in dev_parms()
290 } else if (rebuild && !rs->md.recovery_cp) { in dev_parms()
358 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
367 rs->md.bitmap_info.chunksize = (region_size << 9); in validate_region_size()
387 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
394 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
404 copies = raid10_md_layout_to_copies(rs->md.layout); in validate_raid_redundancy()
422 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { in validate_raid_redundancy()
423 for (i = 0; i < rs->md.raid_disks * copies; i++) { in validate_raid_redundancy()
426 d = i % rs->md.raid_disks; in validate_raid_redundancy()
447 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
448 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
450 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
546 for (i = 0; i < rs->md.raid_disks; i++) { in parse_raid_params()
556 rs->md.recovery_cp = MaxSector; in parse_raid_params()
561 rs->md.recovery_cp = 0; in parse_raid_params()
598 if (value >= rs->md.raid_disks) { in parse_raid_params()
610 if (value >= rs->md.raid_disks) { in parse_raid_params()
631 rs->md.bitmap_info.max_write_behind = value; in parse_raid_params()
638 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
653 if (raid5_set_cache_size(&rs->md, (int)value)) { in parse_raid_params()
663 rs->md.sync_speed_min = (int)value; in parse_raid_params()
670 rs->md.sync_speed_max = (int)value; in parse_raid_params()
692 if (rs->md.chunk_sectors) in parse_raid_params()
693 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
701 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
717 sector_div(sectors_per_dev, rs->md.raid_disks); in parse_raid_params()
719 rs->md.layout = raid10_format_to_md_layout(raid10_format, in parse_raid_params()
721 rs->md.new_layout = rs->md.layout; in parse_raid_params()
724 (rs->md.raid_disks - rs->raid_type->parity_devs))) { in parse_raid_params()
728 rs->md.dev_sectors = sectors_per_dev; in parse_raid_params()
731 rs->md.persistent = 0; in parse_raid_params()
732 rs->md.external = 1; in parse_raid_params()
739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event()
748 return mddev_congested(&rs->md, bits); in raid_is_congested()
812 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync()
901 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_init_validation()
1070 struct mddev *mddev = &rs->md; in analyse_superblocks()
1165 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); in configure_discard_support()
1167 for (i = 0; i < rs->md.raid_disks; i++) { in configure_discard_support()
1195 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); in configure_discard_support()
1270 rs->md.sync_super = super_sync; in raid_ctr()
1275 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
1284 mutex_lock(&rs->md.reconfig_mutex); in raid_ctr()
1285 ret = md_run(&rs->md); in raid_ctr()
1286 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
1287 mutex_unlock(&rs->md.reconfig_mutex); in raid_ctr()
1294 if (ti->len != rs->md.array_sectors) { in raid_ctr()
1302 mddev_suspend(&rs->md); in raid_ctr()
1306 md_stop(&rs->md); in raid_ctr()
1318 md_stop(&rs->md); in raid_dtr()
1325 struct mddev *mddev = &rs->md; in raid_map()
1368 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); in raid_status()
1370 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) in raid_status()
1371 sync = rs->md.curr_resync_completed; in raid_status()
1373 sync = rs->md.recovery_cp; in raid_status()
1375 if (sync >= rs->md.resync_max_sectors) { in raid_status()
1380 sync = rs->md.resync_max_sectors; in raid_status()
1381 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { in raid_status()
1395 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1406 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1426 (unsigned long long) rs->md.resync_max_sectors); in raid_status()
1433 DMEMIT(" %s", decipher_sync_action(&rs->md)); in raid_status()
1441 (strcmp(rs->md.last_sync_action, "check")) ? 0 : in raid_status()
1443 atomic64_read(&rs->md.resync_mismatches)); in raid_status()
1447 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1462 raid_param_cnt, rs->md.chunk_sectors); in raid_status()
1465 (rs->md.recovery_cp == MaxSector)) in raid_status()
1470 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1478 rs->md.bitmap_info.daemon_sleep); in raid_status()
1481 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); in raid_status()
1484 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); in raid_status()
1486 for (i = 0; i < rs->md.raid_disks; i++) in raid_status()
1493 rs->md.bitmap_info.max_write_behind); in raid_status()
1496 struct r5conf *conf = rs->md.private; in raid_status()
1505 rs->md.bitmap_info.chunksize >> 9); in raid_status()
1509 raid10_md_layout_to_copies(rs->md.layout)); in raid_status()
1513 raid10_md_layout_to_format(rs->md.layout)); in raid_status()
1515 DMEMIT(" %d", rs->md.raid_disks); in raid_status()
1516 for (i = 0; i < rs->md.raid_disks; i++) { in raid_status()
1533 struct mddev *mddev = &rs->md; in raid_message()
1591 for (i = 0; !ret && i < rs->md.raid_disks; i++) in raid_iterate_devices()
1596 rs->md.dev_sectors, in raid_iterate_devices()
1605 unsigned chunk_size = rs->md.chunk_sectors << 9; in raid_io_hints()
1606 struct r5conf *conf = rs->md.private; in raid_io_hints()
1616 md_stop_writes(&rs->md); in raid_presuspend()
1623 mddev_suspend(&rs->md); in raid_postsuspend()
1634 for (i = 0; i < rs->md.raid_disks; i++) { in attempt_restore_of_faulty_devices()
1673 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
1686 set_bit(MD_CHANGE_DEVS, &rs->md.flags); in raid_resume()
1688 bitmap_load(&rs->md); in raid_resume()
1699 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_resume()
1700 mddev_resume(&rs->md); in raid_resume()