Lines Matching refs:tc
209 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
210 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
460 struct thin_c *tc; member
481 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) in error_thin_bio_list() argument
488 spin_lock_irqsave(&tc->lock, flags); in error_thin_bio_list()
490 spin_unlock_irqrestore(&tc->lock, flags); in error_thin_bio_list()
495 static void requeue_deferred_cells(struct thin_c *tc) in requeue_deferred_cells() argument
497 struct pool *pool = tc->pool; in requeue_deferred_cells()
504 spin_lock_irqsave(&tc->lock, flags); in requeue_deferred_cells()
505 list_splice_init(&tc->deferred_cells, &cells); in requeue_deferred_cells()
506 spin_unlock_irqrestore(&tc->lock, flags); in requeue_deferred_cells()
512 static void requeue_io(struct thin_c *tc) in requeue_io() argument
519 spin_lock_irqsave(&tc->lock, flags); in requeue_io()
520 __merge_bio_list(&bios, &tc->deferred_bio_list); in requeue_io()
521 __merge_bio_list(&bios, &tc->retry_on_resume_list); in requeue_io()
522 spin_unlock_irqrestore(&tc->lock, flags); in requeue_io()
525 requeue_deferred_cells(tc); in requeue_io()
530 struct thin_c *tc; in error_retry_list() local
533 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list()
534 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); in error_retry_list()
550 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
552 struct pool *pool = tc->pool; in get_bio_block()
563 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
565 struct pool *pool = tc->pool; in remap()
568 bio->bi_bdev = tc->pool_dev->bdev; in remap()
578 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
580 bio->bi_bdev = tc->origin_dev->bdev; in remap_to_origin()
583 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
586 dm_thin_changed_this_transaction(tc->td); in bio_triggers_commit()
600 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
602 struct pool *pool = tc->pool; in issue()
605 if (!bio_triggers_commit(tc, bio)) { in issue()
615 if (dm_thin_aborted_changes(tc->td)) { in issue()
629 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
631 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
632 issue(tc, bio); in remap_to_origin_and_issue()
635 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
638 remap(tc, bio, block); in remap_and_issue()
639 issue(tc, bio); in remap_and_issue()
661 struct thin_c *tc; member
678 struct pool *pool = m->tc->pool; in __complete_mapping_preparation()
689 struct pool *pool = m->tc->pool; in complete_mapping_preparation()
727 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) in cell_defer_no_holder() argument
729 struct pool *pool = tc->pool; in cell_defer_no_holder()
732 spin_lock_irqsave(&tc->lock, flags); in cell_defer_no_holder()
733 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
734 spin_unlock_irqrestore(&tc->lock, flags); in cell_defer_no_holder()
739 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
742 struct thin_c *tc; member
757 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
769 static void inc_remap_and_issue_cell(struct thin_c *tc, in inc_remap_and_issue_cell() argument
776 info.tc = tc; in inc_remap_and_issue_cell()
785 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
789 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
792 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
801 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
803 mempool_free(m, m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
808 struct thin_c *tc = m->tc; in process_prepared_mapping() local
809 struct pool *pool = tc->pool; in process_prepared_mapping()
829 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); in process_prepared_mapping()
843 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
846 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
847 remap_and_issue(tc, m->cell->holder, m->data_block); in process_prepared_mapping()
848 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
858 struct thin_c *tc = m->tc; in process_prepared_discard_fail() local
861 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_fail()
862 cell_defer_no_holder(tc, m->cell2); in process_prepared_discard_fail()
863 mempool_free(m, tc->pool->mapping_pool); in process_prepared_discard_fail()
868 struct thin_c *tc = m->tc; in process_prepared_discard_passdown() local
870 inc_all_io_entry(tc->pool, m->bio); in process_prepared_discard_passdown()
871 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown()
872 cell_defer_no_holder(tc, m->cell2); in process_prepared_discard_passdown()
876 remap_and_issue(tc, m->bio, m->data_block); in process_prepared_discard_passdown()
879 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used) in process_prepared_discard_passdown()
882 remap_and_issue(tc, m->bio, m->data_block); in process_prepared_discard_passdown()
887 mempool_free(m, tc->pool->mapping_pool); in process_prepared_discard_passdown()
893 struct thin_c *tc = m->tc; in process_prepared_discard() local
895 r = dm_thin_remove_block(tc->td, m->virt_block); in process_prepared_discard()
965 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, in ll_zero() argument
971 to.bdev = tc->pool_dev->bdev; in ll_zero()
975 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
982 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
986 struct pool *pool = tc->pool; in remap_and_issue_overwrite()
993 remap_and_issue(tc, bio, data_block); in remap_and_issue_overwrite()
999 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_copy() argument
1006 struct pool *pool = tc->pool; in schedule_copy()
1009 m->tc = tc; in schedule_copy()
1031 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1039 to.bdev = tc->pool_dev->bdev; in schedule_copy()
1062 ll_zero(tc, m, in schedule_copy()
1071 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_internal_copy() argument
1075 schedule_copy(tc, virt_block, tc->pool_dev, in schedule_internal_copy()
1077 tc->pool->sectors_per_block); in schedule_internal_copy()
1080 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, in schedule_zero() argument
1084 struct pool *pool = tc->pool; in schedule_zero()
1088 m->tc = tc; in schedule_zero()
1102 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1105 ll_zero(tc, m, in schedule_zero()
1110 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_external_copy() argument
1114 struct pool *pool = tc->pool; in schedule_external_copy()
1118 if (virt_block_end <= tc->origin_size) in schedule_external_copy()
1119 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1123 else if (virt_block_begin < tc->origin_size) in schedule_external_copy()
1124 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1126 tc->origin_size - virt_block_begin); in schedule_external_copy()
1129 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1184 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) in alloc_data_block() argument
1188 struct pool *pool = tc->pool; in alloc_data_block()
1238 struct thin_c *tc = h->tc; in retry_on_resume() local
1241 spin_lock_irqsave(&tc->lock, flags); in retry_on_resume()
1242 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1243 spin_unlock_irqrestore(&tc->lock, flags); in retry_on_resume()
1298 static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_discard_cell() argument
1302 struct pool *pool = tc->pool; in process_discard_cell()
1305 dm_block_t block = get_bio_block(tc, bio); in process_discard_cell()
1309 if (tc->requeue_mode) { in process_discard_cell()
1314 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_discard_cell()
1322 build_data_key(tc->td, lookup_result.block, &key2); in process_discard_cell()
1323 if (bio_detain(tc->pool, &key2, bio, &cell2)) { in process_discard_cell()
1324 cell_defer_no_holder(tc, cell); in process_discard_cell()
1334 m->tc = tc; in process_discard_cell()
1348 cell_defer_no_holder(tc, cell); in process_discard_cell()
1349 cell_defer_no_holder(tc, cell2); in process_discard_cell()
1357 remap_and_issue(tc, bio, lookup_result.block); in process_discard_cell()
1367 cell_defer_no_holder(tc, cell); in process_discard_cell()
1374 cell_defer_no_holder(tc, cell); in process_discard_cell()
1380 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1384 dm_block_t block = get_bio_block(tc, bio); in process_discard_bio()
1386 build_virtual_key(tc->td, block, &key); in process_discard_bio()
1387 if (bio_detain(tc->pool, &key, bio, &cell)) in process_discard_bio()
1390 process_discard_cell(tc, cell); in process_discard_bio()
1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1400 struct pool *pool = tc->pool; in break_sharing()
1402 r = alloc_data_block(tc, &data_block); in break_sharing()
1405 schedule_internal_copy(tc, block, lookup_result->block, in break_sharing()
1434 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1435 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1441 static void remap_and_issue_shared_cell(struct thin_c *tc, in remap_and_issue_shared_cell() argument
1448 info.tc = tc; in remap_and_issue_shared_cell()
1452 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1456 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1459 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1462 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1468 struct pool *pool = tc->pool; in process_shared_bio()
1475 build_data_key(tc->td, lookup_result->block, &key); in process_shared_bio()
1477 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1482 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1483 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1489 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1491 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); in process_shared_bio()
1492 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); in process_shared_bio()
1496 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1501 struct pool *pool = tc->pool; in provision_block()
1508 cell_defer_no_holder(tc, cell); in provision_block()
1510 remap_and_issue(tc, bio, 0); in provision_block()
1519 cell_defer_no_holder(tc, cell); in provision_block()
1524 r = alloc_data_block(tc, &data_block); in provision_block()
1527 if (tc->origin_dev) in provision_block()
1528 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1530 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1545 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell() argument
1548 struct pool *pool = tc->pool; in process_cell()
1550 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1553 if (tc->requeue_mode) { in process_cell()
1558 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_cell()
1562 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1565 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1566 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in process_cell()
1571 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1573 cell_defer_no_holder(tc, cell); in process_cell()
1575 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1576 remap_to_origin_and_issue(tc, bio); in process_cell()
1578 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1580 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1581 remap_to_origin_and_issue(tc, bio); in process_cell()
1588 provision_block(tc, bio, block, cell); in process_cell()
1594 cell_defer_no_holder(tc, cell); in process_cell()
1600 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
1602 struct pool *pool = tc->pool; in process_bio()
1603 dm_block_t block = get_bio_block(tc, bio); in process_bio()
1611 build_virtual_key(tc->td, block, &key); in process_bio()
1615 process_cell(tc, cell); in process_bio()
1618 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
1623 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
1626 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in __process_bio_read_only()
1630 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1632 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
1634 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1635 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
1637 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in __process_bio_read_only()
1643 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
1645 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1649 if (tc->origin_dev) { in __process_bio_read_only()
1650 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1651 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
1663 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
1669 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
1671 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
1674 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_read_only() argument
1676 __process_bio_read_only(tc, cell->holder, cell); in process_cell_read_only()
1679 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
1684 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
1689 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_success() argument
1691 cell_success(tc->pool, cell); in process_cell_success()
1694 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_fail() argument
1696 cell_error(tc->pool, cell); in process_cell_fail()
1712 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
1718 rbp = &tc->sort_bio_list.rb_node; in __thin_bio_rb_add()
1732 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); in __thin_bio_rb_add()
1735 static void __extract_sorted_bios(struct thin_c *tc) in __extract_sorted_bios() argument
1741 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
1745 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
1746 rb_erase(&pbd->rb_node, &tc->sort_bio_list); in __extract_sorted_bios()
1749 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); in __extract_sorted_bios()
1752 static void __sort_thin_deferred_bios(struct thin_c *tc) in __sort_thin_deferred_bios() argument
1758 bio_list_merge(&bios, &tc->deferred_bio_list); in __sort_thin_deferred_bios()
1759 bio_list_init(&tc->deferred_bio_list); in __sort_thin_deferred_bios()
1763 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
1770 __extract_sorted_bios(tc); in __sort_thin_deferred_bios()
1773 static void process_thin_deferred_bios(struct thin_c *tc) in process_thin_deferred_bios() argument
1775 struct pool *pool = tc->pool; in process_thin_deferred_bios()
1782 if (tc->requeue_mode) { in process_thin_deferred_bios()
1783 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); in process_thin_deferred_bios()
1789 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_bios()
1791 if (bio_list_empty(&tc->deferred_bio_list)) { in process_thin_deferred_bios()
1792 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
1796 __sort_thin_deferred_bios(tc); in process_thin_deferred_bios()
1798 bio_list_merge(&bios, &tc->deferred_bio_list); in process_thin_deferred_bios()
1799 bio_list_init(&tc->deferred_bio_list); in process_thin_deferred_bios()
1801 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
1811 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_bios()
1812 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
1813 bio_list_merge(&tc->deferred_bio_list, &bios); in process_thin_deferred_bios()
1814 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
1819 pool->process_discard(tc, bio); in process_thin_deferred_bios()
1821 pool->process_bio(tc, bio); in process_thin_deferred_bios()
1866 static void process_thin_deferred_cells(struct thin_c *tc) in process_thin_deferred_cells() argument
1868 struct pool *pool = tc->pool; in process_thin_deferred_cells()
1876 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_cells()
1877 list_splice_init(&tc->deferred_cells, &cells); in process_thin_deferred_cells()
1878 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_cells()
1884 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
1899 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_cells()
1900 list_splice(&cells, &tc->deferred_cells); in process_thin_deferred_cells()
1901 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_cells()
1906 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
1908 pool->process_cell(tc, cell); in process_thin_deferred_cells()
1913 static void thin_get(struct thin_c *tc);
1914 static void thin_put(struct thin_c *tc);
1923 struct thin_c *tc = NULL; in get_first_thin() local
1927 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
1928 thin_get(tc); in get_first_thin()
1932 return tc; in get_first_thin()
1935 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
1937 struct thin_c *old_tc = tc; in get_next_thin()
1940 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
1941 thin_get(tc); in get_next_thin()
1944 return tc; in get_next_thin()
1957 struct thin_c *tc; in process_deferred_bios() local
1959 tc = get_first_thin(pool); in process_deferred_bios()
1960 while (tc) { in process_deferred_bios()
1961 process_thin_deferred_cells(tc); in process_deferred_bios()
1962 process_thin_deferred_bios(tc); in process_deferred_bios()
1963 tc = get_next_thin(pool, tc); in process_deferred_bios()
2061 struct thin_c *tc; member
2072 w->tc->requeue_mode = true; in do_noflush_start()
2073 requeue_io(w->tc); in do_noflush_start()
2080 w->tc->requeue_mode = false; in do_noflush_stop()
2084 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) in noflush_work() argument
2088 w.tc = tc; in noflush_work()
2089 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2239 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2242 struct pool *pool = tc->pool; in thin_defer_bio()
2244 spin_lock_irqsave(&tc->lock, flags); in thin_defer_bio()
2245 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2246 spin_unlock_irqrestore(&tc->lock, flags); in thin_defer_bio()
2251 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2253 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle()
2256 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2260 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in thin_defer_cell() argument
2263 struct pool *pool = tc->pool; in thin_defer_cell()
2266 spin_lock_irqsave(&tc->lock, flags); in thin_defer_cell()
2267 list_add_tail(&cell->user_list, &tc->deferred_cells); in thin_defer_cell()
2268 spin_unlock_irqrestore(&tc->lock, flags); in thin_defer_cell()
2274 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2278 h->tc = tc; in thin_hook_bio()
2290 struct thin_c *tc = ti->private; in thin_bio_map() local
2291 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2292 struct dm_thin_device *td = tc->td; in thin_bio_map()
2297 thin_hook_bio(tc, bio); in thin_bio_map()
2299 if (tc->requeue_mode) { in thin_bio_map()
2304 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2310 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2318 build_virtual_key(tc->td, block, &key); in thin_bio_map()
2319 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2344 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2348 build_data_key(tc->td, result.block, &key); in thin_bio_map()
2349 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2350 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2354 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2355 cell_defer_no_holder(tc, data_cell); in thin_bio_map()
2356 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2358 remap(tc, bio, result.block); in thin_bio_map()
2363 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2373 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2393 struct thin_c *tc; in requeue_bios() local
2396 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2397 spin_lock_irqsave(&tc->lock, flags); in requeue_bios()
2398 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); in requeue_bios()
2399 bio_list_init(&tc->retry_on_resume_list); in requeue_bios()
2400 spin_unlock_irqrestore(&tc->lock, flags); in requeue_bios()
3139 struct thin_c *tc; in pool_suspend_active_thins() local
3142 tc = get_first_thin(pool); in pool_suspend_active_thins()
3143 while (tc) { in pool_suspend_active_thins()
3144 dm_internal_suspend_noflush(tc->thin_md); in pool_suspend_active_thins()
3145 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3151 struct thin_c *tc; in pool_resume_active_thins() local
3154 tc = get_first_thin(pool); in pool_resume_active_thins()
3155 while (tc) { in pool_resume_active_thins()
3156 dm_internal_resume(tc->thin_md); in pool_resume_active_thins()
3157 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
3689 static void thin_get(struct thin_c *tc) in thin_get() argument
3691 atomic_inc(&tc->refcount); in thin_get()
3694 static void thin_put(struct thin_c *tc) in thin_put() argument
3696 if (atomic_dec_and_test(&tc->refcount)) in thin_put()
3697 complete(&tc->can_destroy); in thin_put()
3702 struct thin_c *tc = ti->private; in thin_dtr() local
3705 spin_lock_irqsave(&tc->pool->lock, flags); in thin_dtr()
3706 list_del_rcu(&tc->list); in thin_dtr()
3707 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_dtr()
3710 thin_put(tc); in thin_dtr()
3711 wait_for_completion(&tc->can_destroy); in thin_dtr()
3715 __pool_dec(tc->pool); in thin_dtr()
3716 dm_pool_close_thin_device(tc->td); in thin_dtr()
3717 dm_put_device(ti, tc->pool_dev); in thin_dtr()
3718 if (tc->origin_dev) in thin_dtr()
3719 dm_put_device(ti, tc->origin_dev); in thin_dtr()
3720 kfree(tc); in thin_dtr()
3740 struct thin_c *tc; in thin_ctr() local
3753 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); in thin_ctr()
3754 if (!tc) { in thin_ctr()
3759 tc->thin_md = dm_table_get_md(ti->table); in thin_ctr()
3760 spin_lock_init(&tc->lock); in thin_ctr()
3761 INIT_LIST_HEAD(&tc->deferred_cells); in thin_ctr()
3762 bio_list_init(&tc->deferred_bio_list); in thin_ctr()
3763 bio_list_init(&tc->retry_on_resume_list); in thin_ctr()
3764 tc->sort_bio_list = RB_ROOT; in thin_ctr()
3772 tc->origin_dev = origin_dev; in thin_ctr()
3780 tc->pool_dev = pool_dev; in thin_ctr()
3782 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { in thin_ctr()
3788 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); in thin_ctr()
3795 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
3796 if (!tc->pool) { in thin_ctr()
3801 __pool_inc(tc->pool); in thin_ctr()
3803 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
3809 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
3815 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
3825 if (tc->pool->pf.discard_enabled) { in thin_ctr()
3834 spin_lock_irqsave(&tc->pool->lock, flags); in thin_ctr()
3835 if (tc->pool->suspended) { in thin_ctr()
3836 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
3842 atomic_set(&tc->refcount, 1); in thin_ctr()
3843 init_completion(&tc->can_destroy); in thin_ctr()
3844 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
3845 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
3859 dm_pool_close_thin_device(tc->td); in thin_ctr()
3861 __pool_dec(tc->pool); in thin_ctr()
3865 dm_put_device(ti, tc->pool_dev); in thin_ctr()
3867 if (tc->origin_dev) in thin_ctr()
3868 dm_put_device(ti, tc->origin_dev); in thin_ctr()
3870 kfree(tc); in thin_ctr()
3890 struct pool *pool = h->tc->pool; in thin_endio()
3921 struct thin_c *tc = ti->private; in thin_presuspend() local
3924 noflush_work(tc, do_noflush_start); in thin_presuspend()
3929 struct thin_c *tc = ti->private; in thin_postsuspend() local
3935 noflush_work(tc, do_noflush_stop); in thin_postsuspend()
3940 struct thin_c *tc = ti->private; in thin_preresume() local
3942 if (tc->origin_dev) in thin_preresume()
3943 tc->origin_size = get_dev_size(tc->origin_dev->bdev); in thin_preresume()
3958 struct thin_c *tc = ti->private; in thin_status() local
3960 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
3965 if (!tc->td) in thin_status()
3970 r = dm_thin_get_mapped_count(tc->td, &mapped); in thin_status()
3976 r = dm_thin_get_highest_mapped_block(tc->td, &highest); in thin_status()
3982 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
3985 tc->pool->sectors_per_block) - 1); in thin_status()
3992 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), in thin_status()
3993 (unsigned long) tc->dev_id); in thin_status()
3994 if (tc->origin_dev) in thin_status()
3995 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); in thin_status()
4009 struct thin_c *tc = ti->private; in thin_merge() local
4010 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev); in thin_merge()
4015 bvm->bi_bdev = tc->pool_dev->bdev; in thin_merge()
4025 struct thin_c *tc = ti->private; in thin_iterate_devices() local
4026 struct pool *pool = tc->pool; in thin_iterate_devices()
4038 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()