Lines Matching refs:era

656 	uint32_t era;  member
672 uint64_t key = d->era; in metadata_digest_remove_writeset()
733 d->era = key; in metadata_digest_lookup_writeset()
1108 uint32_t era; member
1132 s->era = md->current_era; in metadata_get_stats()
1139 struct era { struct
1178 static bool block_size_is_power_of_two(struct era *era) in block_size_is_power_of_two() argument
1180 return era->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
1183 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1187 if (!block_size_is_power_of_two(era)) in get_block()
1188 (void) sector_div(block_nr, era->sectors_per_block); in get_block()
1190 block_nr >>= era->sectors_per_block_shift; in get_block()
1195 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1197 bio->bi_bdev = era->origin_dev->bdev; in remap_to_origin()
1203 static void wake_worker(struct era *era) in wake_worker() argument
1205 if (!atomic_read(&era->suspended)) in wake_worker()
1206 queue_work(era->wq, &era->worker); in wake_worker()
1209 static void process_old_eras(struct era *era) in process_old_eras() argument
1213 if (!era->digest.step) in process_old_eras()
1216 r = era->digest.step(era->md, &era->digest); in process_old_eras()
1219 era->digest.step = NULL; in process_old_eras()
1221 } else if (era->digest.step) in process_old_eras()
1222 wake_worker(era); in process_old_eras()
1225 static void process_deferred_bios(struct era *era) in process_deferred_bios() argument
1236 spin_lock(&era->deferred_lock); in process_deferred_bios()
1237 bio_list_merge(&deferred_bios, &era->deferred_bios); in process_deferred_bios()
1238 bio_list_init(&era->deferred_bios); in process_deferred_bios()
1239 spin_unlock(&era->deferred_lock); in process_deferred_bios()
1242 r = writeset_test_and_set(&era->md->bitset_info, in process_deferred_bios()
1243 era->md->current_writeset, in process_deferred_bios()
1244 get_block(era, bio)); in process_deferred_bios()
1259 r = metadata_commit(era->md); in process_deferred_bios()
1272 static void process_rpc_calls(struct era *era) in process_rpc_calls() argument
1280 spin_lock(&era->rpc_lock); in process_rpc_calls()
1281 list_splice_init(&era->rpc_calls, &calls); in process_rpc_calls()
1282 spin_unlock(&era->rpc_lock); in process_rpc_calls()
1285 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); in process_rpc_calls()
1290 r = metadata_commit(era->md); in process_rpc_calls()
1300 static void kick_off_digest(struct era *era) in kick_off_digest() argument
1302 if (era->md->archived_writesets) { in kick_off_digest()
1303 era->md->archived_writesets = false; in kick_off_digest()
1304 metadata_digest_start(era->md, &era->digest); in kick_off_digest()
1310 struct era *era = container_of(ws, struct era, worker); in do_work() local
1312 kick_off_digest(era); in do_work()
1313 process_old_eras(era); in do_work()
1314 process_deferred_bios(era); in do_work()
1315 process_rpc_calls(era); in do_work()
1318 static void defer_bio(struct era *era, struct bio *bio) in defer_bio() argument
1320 spin_lock(&era->deferred_lock); in defer_bio()
1321 bio_list_add(&era->deferred_bios, bio); in defer_bio()
1322 spin_unlock(&era->deferred_lock); in defer_bio()
1324 wake_worker(era); in defer_bio()
1330 static int perform_rpc(struct era *era, struct rpc *rpc) in perform_rpc() argument
1335 spin_lock(&era->rpc_lock); in perform_rpc()
1336 list_add(&rpc->list, &era->rpc_calls); in perform_rpc()
1337 spin_unlock(&era->rpc_lock); in perform_rpc()
1339 wake_worker(era); in perform_rpc()
1345 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) in in_worker0() argument
1351 return perform_rpc(era, &rpc); in in_worker0()
1354 static int in_worker1(struct era *era, in in_worker1() argument
1362 return perform_rpc(era, &rpc); in in_worker1()
1365 static void start_worker(struct era *era) in start_worker() argument
1367 atomic_set(&era->suspended, 0); in start_worker()
1370 static void stop_worker(struct era *era) in stop_worker() argument
1372 atomic_set(&era->suspended, 1); in stop_worker()
1373 flush_workqueue(era->wq); in stop_worker()
1387 struct era *era = container_of(cb, struct era, callbacks); in era_is_congested() local
1388 return dev_is_congested(era->origin_dev, bdi_bits); in era_is_congested()
1391 static void era_destroy(struct era *era) in era_destroy() argument
1393 if (era->md) in era_destroy()
1394 metadata_close(era->md); in era_destroy()
1396 if (era->wq) in era_destroy()
1397 destroy_workqueue(era->wq); in era_destroy()
1399 if (era->origin_dev) in era_destroy()
1400 dm_put_device(era->ti, era->origin_dev); in era_destroy()
1402 if (era->metadata_dev) in era_destroy()
1403 dm_put_device(era->ti, era->metadata_dev); in era_destroy()
1405 kfree(era); in era_destroy()
1408 static dm_block_t calc_nr_blocks(struct era *era) in calc_nr_blocks() argument
1410 return dm_sector_div_up(era->ti->len, era->sectors_per_block); in calc_nr_blocks()
1428 struct era *era; in era_ctr() local
1436 era = kzalloc(sizeof(*era), GFP_KERNEL); in era_ctr()
1437 if (!era) { in era_ctr()
1442 era->ti = ti; in era_ctr()
1444 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); in era_ctr()
1447 era_destroy(era); in era_ctr()
1451 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); in era_ctr()
1454 era_destroy(era); in era_ctr()
1458 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); in era_ctr()
1461 era_destroy(era); in era_ctr()
1465 r = dm_set_target_max_io_len(ti, era->sectors_per_block); in era_ctr()
1468 era_destroy(era); in era_ctr()
1472 if (!valid_block_size(era->sectors_per_block)) { in era_ctr()
1474 era_destroy(era); in era_ctr()
1477 if (era->sectors_per_block & (era->sectors_per_block - 1)) in era_ctr()
1478 era->sectors_per_block_shift = -1; in era_ctr()
1480 era->sectors_per_block_shift = __ffs(era->sectors_per_block); in era_ctr()
1482 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); in era_ctr()
1485 era_destroy(era); in era_ctr()
1488 era->md = md; in era_ctr()
1490 era->nr_blocks = calc_nr_blocks(era); in era_ctr()
1492 r = metadata_resize(era->md, &era->nr_blocks); in era_ctr()
1495 era_destroy(era); in era_ctr()
1499 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr()
1500 if (!era->wq) { in era_ctr()
1502 era_destroy(era); in era_ctr()
1505 INIT_WORK(&era->worker, do_work); in era_ctr()
1507 spin_lock_init(&era->deferred_lock); in era_ctr()
1508 bio_list_init(&era->deferred_bios); in era_ctr()
1510 spin_lock_init(&era->rpc_lock); in era_ctr()
1511 INIT_LIST_HEAD(&era->rpc_calls); in era_ctr()
1513 ti->private = era; in era_ctr()
1519 era->callbacks.congested_fn = era_is_congested; in era_ctr()
1520 dm_table_add_target_callbacks(ti->table, &era->callbacks); in era_ctr()
1532 struct era *era = ti->private; in era_map() local
1533 dm_block_t block = get_block(era, bio); in era_map()
1540 remap_to_origin(era, bio); in era_map()
1547 !metadata_current_marked(era->md, block)) { in era_map()
1548 defer_bio(era, bio); in era_map()
1558 struct era *era = ti->private; in era_postsuspend() local
1560 r = in_worker0(era, metadata_era_archive); in era_postsuspend()
1566 stop_worker(era); in era_postsuspend()
1572 struct era *era = ti->private; in era_preresume() local
1573 dm_block_t new_size = calc_nr_blocks(era); in era_preresume()
1575 if (era->nr_blocks != new_size) { in era_preresume()
1576 r = in_worker1(era, metadata_resize, &new_size); in era_preresume()
1580 era->nr_blocks = new_size; in era_preresume()
1583 start_worker(era); in era_preresume()
1585 r = in_worker0(era, metadata_new_era); in era_preresume()
1604 struct era *era = ti->private; in era_status() local
1611 r = in_worker1(era, metadata_get_stats, &stats); in era_status()
1619 (unsigned) stats.era); in era_status()
1628 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); in era_status()
1630 format_dev_t(buf, era->origin_dev->bdev->bd_dev); in era_status()
1631 DMEMIT("%s %u", buf, era->sectors_per_block); in era_status()
1643 struct era *era = ti->private; in era_message() local
1651 return in_worker0(era, metadata_checkpoint); in era_message()
1654 return in_worker0(era, metadata_take_snap); in era_message()
1657 return in_worker0(era, metadata_drop_snap); in era_message()
1671 struct era *era = ti->private; in era_iterate_devices() local
1672 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); in era_iterate_devices()
1677 struct era *era = ti->private; in era_io_hints() local
1684 if (io_opt_sectors < era->sectors_per_block || in era_io_hints()
1685 do_div(io_opt_sectors, era->sectors_per_block)) { in era_io_hints()
1687 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()