Lines Matching refs:era

652 	uint32_t era;  member
668 uint64_t key = d->era; in metadata_digest_remove_writeset()
729 d->era = key; in metadata_digest_lookup_writeset()
1109 uint32_t era; member
1133 s->era = md->current_era; in metadata_get_stats()
1140 struct era { struct
1179 static bool block_size_is_power_of_two(struct era *era) in block_size_is_power_of_two() argument
1181 return era->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
1184 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1188 if (!block_size_is_power_of_two(era)) in get_block()
1189 (void) sector_div(block_nr, era->sectors_per_block); in get_block()
1191 block_nr >>= era->sectors_per_block_shift; in get_block()
1196 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1198 bio->bi_bdev = era->origin_dev->bdev; in remap_to_origin()
1204 static void wake_worker(struct era *era) in wake_worker() argument
1206 if (!atomic_read(&era->suspended)) in wake_worker()
1207 queue_work(era->wq, &era->worker); in wake_worker()
1210 static void process_old_eras(struct era *era) in process_old_eras() argument
1214 if (!era->digest.step) in process_old_eras()
1217 r = era->digest.step(era->md, &era->digest); in process_old_eras()
1220 era->digest.step = NULL; in process_old_eras()
1222 } else if (era->digest.step) in process_old_eras()
1223 wake_worker(era); in process_old_eras()
1226 static void process_deferred_bios(struct era *era) in process_deferred_bios() argument
1237 spin_lock(&era->deferred_lock); in process_deferred_bios()
1238 bio_list_merge(&deferred_bios, &era->deferred_bios); in process_deferred_bios()
1239 bio_list_init(&era->deferred_bios); in process_deferred_bios()
1240 spin_unlock(&era->deferred_lock); in process_deferred_bios()
1243 r = writeset_test_and_set(&era->md->bitset_info, in process_deferred_bios()
1244 era->md->current_writeset, in process_deferred_bios()
1245 get_block(era, bio)); in process_deferred_bios()
1260 r = metadata_commit(era->md); in process_deferred_bios()
1273 static void process_rpc_calls(struct era *era) in process_rpc_calls() argument
1281 spin_lock(&era->rpc_lock); in process_rpc_calls()
1282 list_splice_init(&era->rpc_calls, &calls); in process_rpc_calls()
1283 spin_unlock(&era->rpc_lock); in process_rpc_calls()
1286 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); in process_rpc_calls()
1291 r = metadata_commit(era->md); in process_rpc_calls()
1301 static void kick_off_digest(struct era *era) in kick_off_digest() argument
1303 if (era->md->archived_writesets) { in kick_off_digest()
1304 era->md->archived_writesets = false; in kick_off_digest()
1305 metadata_digest_start(era->md, &era->digest); in kick_off_digest()
1311 struct era *era = container_of(ws, struct era, worker); in do_work() local
1313 kick_off_digest(era); in do_work()
1314 process_old_eras(era); in do_work()
1315 process_deferred_bios(era); in do_work()
1316 process_rpc_calls(era); in do_work()
1319 static void defer_bio(struct era *era, struct bio *bio) in defer_bio() argument
1321 spin_lock(&era->deferred_lock); in defer_bio()
1322 bio_list_add(&era->deferred_bios, bio); in defer_bio()
1323 spin_unlock(&era->deferred_lock); in defer_bio()
1325 wake_worker(era); in defer_bio()
1331 static int perform_rpc(struct era *era, struct rpc *rpc) in perform_rpc() argument
1336 spin_lock(&era->rpc_lock); in perform_rpc()
1337 list_add(&rpc->list, &era->rpc_calls); in perform_rpc()
1338 spin_unlock(&era->rpc_lock); in perform_rpc()
1340 wake_worker(era); in perform_rpc()
1346 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) in in_worker0() argument
1352 return perform_rpc(era, &rpc); in in_worker0()
1355 static int in_worker1(struct era *era, in in_worker1() argument
1363 return perform_rpc(era, &rpc); in in_worker1()
1366 static void start_worker(struct era *era) in start_worker() argument
1368 atomic_set(&era->suspended, 0); in start_worker()
1371 static void stop_worker(struct era *era) in stop_worker() argument
1373 atomic_set(&era->suspended, 1); in stop_worker()
1374 flush_workqueue(era->wq); in stop_worker()
1388 struct era *era = container_of(cb, struct era, callbacks); in era_is_congested() local
1389 return dev_is_congested(era->origin_dev, bdi_bits); in era_is_congested()
1392 static void era_destroy(struct era *era) in era_destroy() argument
1394 if (era->md) in era_destroy()
1395 metadata_close(era->md); in era_destroy()
1397 if (era->wq) in era_destroy()
1398 destroy_workqueue(era->wq); in era_destroy()
1400 if (era->origin_dev) in era_destroy()
1401 dm_put_device(era->ti, era->origin_dev); in era_destroy()
1403 if (era->metadata_dev) in era_destroy()
1404 dm_put_device(era->ti, era->metadata_dev); in era_destroy()
1406 kfree(era); in era_destroy()
1409 static dm_block_t calc_nr_blocks(struct era *era) in calc_nr_blocks() argument
1411 return dm_sector_div_up(era->ti->len, era->sectors_per_block); in calc_nr_blocks()
1429 struct era *era; in era_ctr() local
1437 era = kzalloc(sizeof(*era), GFP_KERNEL); in era_ctr()
1438 if (!era) { in era_ctr()
1443 era->ti = ti; in era_ctr()
1445 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); in era_ctr()
1448 era_destroy(era); in era_ctr()
1452 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); in era_ctr()
1455 era_destroy(era); in era_ctr()
1459 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); in era_ctr()
1462 era_destroy(era); in era_ctr()
1466 r = dm_set_target_max_io_len(ti, era->sectors_per_block); in era_ctr()
1469 era_destroy(era); in era_ctr()
1473 if (!valid_block_size(era->sectors_per_block)) { in era_ctr()
1475 era_destroy(era); in era_ctr()
1478 if (era->sectors_per_block & (era->sectors_per_block - 1)) in era_ctr()
1479 era->sectors_per_block_shift = -1; in era_ctr()
1481 era->sectors_per_block_shift = __ffs(era->sectors_per_block); in era_ctr()
1483 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); in era_ctr()
1486 era_destroy(era); in era_ctr()
1489 era->md = md; in era_ctr()
1491 era->nr_blocks = calc_nr_blocks(era); in era_ctr()
1493 r = metadata_resize(era->md, &era->nr_blocks); in era_ctr()
1496 era_destroy(era); in era_ctr()
1500 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr()
1501 if (!era->wq) { in era_ctr()
1503 era_destroy(era); in era_ctr()
1506 INIT_WORK(&era->worker, do_work); in era_ctr()
1508 spin_lock_init(&era->deferred_lock); in era_ctr()
1509 bio_list_init(&era->deferred_bios); in era_ctr()
1511 spin_lock_init(&era->rpc_lock); in era_ctr()
1512 INIT_LIST_HEAD(&era->rpc_calls); in era_ctr()
1514 ti->private = era; in era_ctr()
1520 era->callbacks.congested_fn = era_is_congested; in era_ctr()
1521 dm_table_add_target_callbacks(ti->table, &era->callbacks); in era_ctr()
1533 struct era *era = ti->private; in era_map() local
1534 dm_block_t block = get_block(era, bio); in era_map()
1541 remap_to_origin(era, bio); in era_map()
1548 !metadata_current_marked(era->md, block)) { in era_map()
1549 defer_bio(era, bio); in era_map()
1559 struct era *era = ti->private; in era_postsuspend() local
1561 r = in_worker0(era, metadata_era_archive); in era_postsuspend()
1567 stop_worker(era); in era_postsuspend()
1573 struct era *era = ti->private; in era_preresume() local
1574 dm_block_t new_size = calc_nr_blocks(era); in era_preresume()
1576 if (era->nr_blocks != new_size) { in era_preresume()
1577 r = in_worker1(era, metadata_resize, &new_size); in era_preresume()
1581 era->nr_blocks = new_size; in era_preresume()
1584 start_worker(era); in era_preresume()
1586 r = in_worker0(era, metadata_new_era); in era_preresume()
1605 struct era *era = ti->private; in era_status() local
1612 r = in_worker1(era, metadata_get_stats, &stats); in era_status()
1620 (unsigned) stats.era); in era_status()
1629 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); in era_status()
1631 format_dev_t(buf, era->origin_dev->bdev->bd_dev); in era_status()
1632 DMEMIT("%s %u", buf, era->sectors_per_block); in era_status()
1644 struct era *era = ti->private; in era_message() local
1652 return in_worker0(era, metadata_checkpoint); in era_message()
1655 return in_worker0(era, metadata_take_snap); in era_message()
1658 return in_worker0(era, metadata_drop_snap); in era_message()
1672 struct era *era = ti->private; in era_iterate_devices() local
1673 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); in era_iterate_devices()
1679 struct era *era = ti->private; in era_merge() local
1680 struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); in era_merge()
1685 bvm->bi_bdev = era->origin_dev->bdev; in era_merge()
1692 struct era *era = ti->private; in era_io_hints() local
1699 if (io_opt_sectors < era->sectors_per_block || in era_io_hints()
1700 do_div(io_opt_sectors, era->sectors_per_block)) { in era_io_hints()
1702 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()