Lines Matching refs:md
70 struct mapped_device *md; member
84 struct mapped_device *md; member
243 bool dm_use_blk_mq(struct mapped_device *md) in dm_use_blk_mq() argument
245 return md->use_blk_mq; in dm_use_blk_mq()
440 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
442 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
447 struct mapped_device *md; in dm_blk_open() local
451 md = bdev->bd_disk->private_data; in dm_blk_open()
452 if (!md) in dm_blk_open()
455 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
456 dm_deleting_md(md)) { in dm_blk_open()
457 md = NULL; in dm_blk_open()
461 dm_get(md); in dm_blk_open()
462 atomic_inc(&md->open_count); in dm_blk_open()
466 return md ? 0 : -ENXIO; in dm_blk_open()
471 struct mapped_device *md; in dm_blk_close() local
475 md = disk->private_data; in dm_blk_close()
476 if (WARN_ON(!md)) in dm_blk_close()
479 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
480 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
483 dm_put(md); in dm_blk_close()
488 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
490 return atomic_read(&md->open_count); in dm_open_count()
496 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
502 if (dm_open_count(md)) { in dm_lock_for_deletion()
505 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
506 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
509 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
516 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
522 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
525 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
537 sector_t dm_get_size(struct mapped_device *md) in dm_get_size() argument
539 return get_capacity(md->disk); in dm_get_size()
542 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue() argument
544 return md->queue; in dm_get_md_queue()
547 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats() argument
549 return &md->stats; in dm_get_stats()
554 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
556 return dm_get_geometry(md, geo); in dm_blk_getgeo()
559 static int dm_get_live_table_for_ioctl(struct mapped_device *md, in dm_get_live_table_for_ioctl() argument
568 map = dm_get_live_table(md, srcu_idx); in dm_get_live_table_for_ioctl()
581 if (dm_suspended_md(md)) { in dm_get_live_table_for_ioctl()
593 dm_put_live_table(md, *srcu_idx); in dm_get_live_table_for_ioctl()
604 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); in dm_blk_ioctl()
626 dm_put_live_table(md, srcu_idx); in dm_blk_ioctl()
630 static struct dm_io *alloc_io(struct mapped_device *md) in alloc_io() argument
632 return mempool_alloc(md->io_pool, GFP_NOIO); in alloc_io()
635 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io() argument
637 mempool_free(io, md->io_pool); in free_io()
640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) in free_tio() argument
645 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, in alloc_rq_tio() argument
648 return mempool_alloc(md->io_pool, gfp_mask); in alloc_rq_tio()
653 mempool_free(tio, tio->md->io_pool); in free_rq_tio()
656 static struct request *alloc_clone_request(struct mapped_device *md, in alloc_clone_request() argument
659 return mempool_alloc(md->rq_pool, gfp_mask); in alloc_clone_request()
662 static void free_clone_request(struct mapped_device *md, struct request *rq) in free_clone_request() argument
664 mempool_free(rq, md->rq_pool); in free_clone_request()
667 static int md_in_flight(struct mapped_device *md) in md_in_flight() argument
669 return atomic_read(&md->pending[READ]) + in md_in_flight()
670 atomic_read(&md->pending[WRITE]); in md_in_flight()
675 struct mapped_device *md = io->md; in start_io_acct() local
683 part_round_stats(cpu, &dm_disk(md)->part0); in start_io_acct()
685 atomic_set(&dm_disk(md)->part0.in_flight[rw], in start_io_acct()
686 atomic_inc_return(&md->pending[rw])); in start_io_acct()
688 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
689 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
695 struct mapped_device *md = io->md; in end_io_acct() local
701 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); in end_io_acct()
703 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
704 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct()
711 pending = atomic_dec_return(&md->pending[rw]); in end_io_acct()
712 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); in end_io_acct()
713 pending += atomic_read(&md->pending[rw^0x1]); in end_io_acct()
717 wake_up(&md->wait); in end_io_acct()
723 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
727 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
728 bio_list_add(&md->deferred, bio); in queue_io()
729 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
730 queue_work(md->wq, &md->work); in queue_io()
738 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table() argument
740 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
742 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
745 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table() argument
747 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
750 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
752 synchronize_srcu(&md->io_barrier); in dm_sync_table()
760 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
763 return rcu_dereference(md->map); in dm_get_live_table_fast()
766 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
775 struct mapped_device *md) in open_table_device() argument
788 r = bd_link_disk_holder(bdev, dm_disk(md)); in open_table_device()
801 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
806 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
822 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device() argument
827 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
828 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
832 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
839 if ((r = open_table_device(td, dev, md))) { in dm_get_table_device()
840 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
848 list_add(&td->list, &md->table_devices); in dm_get_table_device()
851 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
862 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
864 close_table_device(td, md); in dm_put_table_device()
868 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
890 *geo = md->geometry; in dm_get_geometry()
898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
907 md->geometry = *geo; in dm_set_geometry()
921 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
923 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
935 struct mapped_device *md = io->md; in dec_pending() local
940 if (!(io->error > 0 && __noflush_suspending(md))) in dec_pending()
950 spin_lock_irqsave(&md->deferred_lock, flags); in dec_pending()
951 if (__noflush_suspending(md)) in dec_pending()
952 bio_list_add_head(&md->deferred, io->bio); in dec_pending()
956 spin_unlock_irqrestore(&md->deferred_lock, flags); in dec_pending()
962 free_io(md, io); in dec_pending()
973 queue_io(md, bio); in dec_pending()
976 trace_block_bio_complete(md->queue, bio, io_error); in dec_pending()
983 static void disable_write_same(struct mapped_device *md) in disable_write_same() argument
985 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_same()
997 struct mapped_device *md = tio->io->md; in clone_endio() local
1019 disable_write_same(md); in clone_endio()
1021 free_tio(md, tio); in clone_endio()
1082 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
1084 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
1087 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), in rq_end_stats()
1098 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) in rq_completed() argument
1100 atomic_dec(&md->pending[rw]); in rq_completed()
1103 if (!md_in_flight(md)) in rq_completed()
1104 wake_up(&md->wait); in rq_completed()
1112 if (!md->queue->mq_ops && run_queue) in rq_completed()
1113 blk_run_queue_async(md->queue); in rq_completed()
1118 dm_put(md); in rq_completed()
1124 struct mapped_device *md = tio->md; in free_rq_clone() local
1128 if (md->type == DM_TYPE_MQ_REQUEST_BASED) in free_rq_clone()
1131 else if (!md->queue->mq_ops) in free_rq_clone()
1133 free_clone_request(md, clone); in free_rq_clone()
1140 if (!md->queue->mq_ops) in free_rq_clone()
1153 struct mapped_device *md = tio->md; in dm_end_request() local
1170 rq_end_stats(md, rq); in dm_end_request()
1175 rq_completed(md, rw, true); in dm_end_request()
1190 else if (!tio->md->queue->mq_ops) in dm_unprep_request()
1208 static void dm_requeue_original_request(struct mapped_device *md, in dm_requeue_original_request() argument
1213 rq_end_stats(md, rq); in dm_requeue_original_request()
1223 rq_completed(md, rw, false); in dm_requeue_original_request()
1279 disable_write_same(tio->md); in dm_done()
1289 dm_requeue_original_request(tio->md, tio->orig); in dm_done()
1307 rq_end_stats(tio->md, rq); in dm_softirq_done()
1311 rq_completed(tio->md, rw, false); in dm_softirq_done()
1315 rq_completed(tio->md, rw, false); in dm_softirq_done()
1474 struct mapped_device *md; in __map_bio() local
1497 md = tio->io->md; in __map_bio()
1499 free_tio(md, tio); in __map_bio()
1507 struct mapped_device *md; member
1548 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); in alloc_tio()
1714 static void __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio() argument
1726 ci.md = md; in __split_and_process_bio()
1727 ci.io = alloc_io(md); in __split_and_process_bio()
1731 ci.io->md = md; in __split_and_process_bio()
1738 ci.bio = &ci.md->flush_bio; in __split_and_process_bio()
1763 struct mapped_device *md = q->queuedata; in dm_make_request() local
1767 map = dm_get_live_table(md, &srcu_idx); in dm_make_request()
1769 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); in dm_make_request()
1772 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_make_request()
1773 dm_put_live_table(md, srcu_idx); in dm_make_request()
1776 queue_io(md, bio); in dm_make_request()
1782 __split_and_process_bio(md, map, bio); in dm_make_request()
1783 dm_put_live_table(md, srcu_idx); in dm_make_request()
1787 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
1789 return blk_queue_stackable(md->queue); in dm_request_based()
1825 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, in setup_clone()
1841 static struct request *clone_rq(struct request *rq, struct mapped_device *md, in clone_rq() argument
1852 clone = alloc_clone_request(md, gfp_mask); in clone_rq()
1862 free_clone_request(md, clone); in clone_rq()
1872 struct mapped_device *md) in init_tio() argument
1874 tio->md = md; in init_tio()
1880 if (md->kworker_task) in init_tio()
1885 struct mapped_device *md, gfp_t gfp_mask) in prep_tio() argument
1891 tio = alloc_rq_tio(md, gfp_mask); in prep_tio()
1895 init_tio(tio, rq, md); in prep_tio()
1897 table = dm_get_live_table(md, &srcu_idx); in prep_tio()
1899 if (!clone_rq(rq, md, tio, gfp_mask)) { in prep_tio()
1900 dm_put_live_table(md, srcu_idx); in prep_tio()
1905 dm_put_live_table(md, srcu_idx); in prep_tio()
1915 struct mapped_device *md = q->queuedata; in dm_prep_fn() local
1923 tio = prep_tio(rq, md, GFP_ATOMIC); in dm_prep_fn()
1940 struct mapped_device *md) in map_request() argument
1971 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), in map_request()
1977 dm_requeue_original_request(md, tio->orig); in map_request()
1997 struct mapped_device *md = tio->md; in map_tio_request() local
1999 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) in map_tio_request()
2000 dm_requeue_original_request(md, rq); in map_tio_request()
2003 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request() argument
2009 atomic_inc(&md->pending[rq_data_dir(orig)]); in dm_start_request()
2011 if (md->seq_rq_merge_deadline_usecs) { in dm_start_request()
2012 md->last_rq_pos = rq_end_sector(orig); in dm_start_request()
2013 md->last_rq_rw = rq_data_dir(orig); in dm_start_request()
2014 md->last_rq_start_time = ktime_get(); in dm_start_request()
2017 if (unlikely(dm_stats_used(&md->stats))) { in dm_start_request()
2021 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), in dm_start_request()
2032 dm_get(md); in dm_start_request()
2037 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show() argument
2039 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); in dm_attr_rq_based_seq_io_merge_deadline_show()
2042 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store() argument
2047 if (!dm_request_based(md) || md->use_blk_mq) in dm_attr_rq_based_seq_io_merge_deadline_store()
2056 md->seq_rq_merge_deadline_usecs = deadline; in dm_attr_rq_based_seq_io_merge_deadline_store()
2061 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) in dm_request_peeked_before_merge_deadline() argument
2065 if (!md->seq_rq_merge_deadline_usecs) in dm_request_peeked_before_merge_deadline()
2068 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); in dm_request_peeked_before_merge_deadline()
2069 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); in dm_request_peeked_before_merge_deadline()
2080 struct mapped_device *md = q->queuedata; in dm_request_fn() local
2082 struct dm_table *map = dm_get_live_table(md, &srcu_idx); in dm_request_fn()
2111 dm_start_request(md, rq); in dm_request_fn()
2116 if (dm_request_peeked_before_merge_deadline(md) && in dm_request_fn()
2117 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && in dm_request_fn()
2118 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) in dm_request_fn()
2124 dm_start_request(md, rq); in dm_request_fn()
2129 queue_kthread_work(&md->kworker, &tio->work); in dm_request_fn()
2138 dm_put_live_table(md, srcu_idx); in dm_request_fn()
2144 struct mapped_device *md = congested_data; in dm_any_congested() local
2147 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_any_congested()
2148 map = dm_get_live_table_fast(md); in dm_any_congested()
2154 if (dm_request_based(md)) in dm_any_congested()
2155 r = md->queue->backing_dev_info.wb.state & in dm_any_congested()
2160 dm_put_live_table_fast(md); in dm_any_congested()
2219 static void dm_init_md_queue(struct mapped_device *md) in dm_init_md_queue() argument
2230 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); in dm_init_md_queue()
2236 md->queue->queuedata = md; in dm_init_md_queue()
2237 md->queue->backing_dev_info.congested_data = md; in dm_init_md_queue()
2240 static void dm_init_old_md_queue(struct mapped_device *md) in dm_init_old_md_queue() argument
2242 md->use_blk_mq = false; in dm_init_old_md_queue()
2243 dm_init_md_queue(md); in dm_init_old_md_queue()
2248 md->queue->backing_dev_info.congested_fn = dm_any_congested; in dm_init_old_md_queue()
2249 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); in dm_init_old_md_queue()
2252 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
2254 if (md->wq) in cleanup_mapped_device()
2255 destroy_workqueue(md->wq); in cleanup_mapped_device()
2256 if (md->kworker_task) in cleanup_mapped_device()
2257 kthread_stop(md->kworker_task); in cleanup_mapped_device()
2258 mempool_destroy(md->io_pool); in cleanup_mapped_device()
2259 mempool_destroy(md->rq_pool); in cleanup_mapped_device()
2260 if (md->bs) in cleanup_mapped_device()
2261 bioset_free(md->bs); in cleanup_mapped_device()
2263 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
2265 if (md->disk) { in cleanup_mapped_device()
2267 md->disk->private_data = NULL; in cleanup_mapped_device()
2269 del_gendisk(md->disk); in cleanup_mapped_device()
2270 put_disk(md->disk); in cleanup_mapped_device()
2273 if (md->queue) in cleanup_mapped_device()
2274 blk_cleanup_queue(md->queue); in cleanup_mapped_device()
2276 if (md->bdev) { in cleanup_mapped_device()
2277 bdput(md->bdev); in cleanup_mapped_device()
2278 md->bdev = NULL; in cleanup_mapped_device()
2288 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); in alloc_dev() local
2291 if (!md) { in alloc_dev()
2307 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
2311 md->use_blk_mq = use_blk_mq; in alloc_dev()
2312 md->type = DM_TYPE_NONE; in alloc_dev()
2313 mutex_init(&md->suspend_lock); in alloc_dev()
2314 mutex_init(&md->type_lock); in alloc_dev()
2315 mutex_init(&md->table_devices_lock); in alloc_dev()
2316 spin_lock_init(&md->deferred_lock); in alloc_dev()
2317 atomic_set(&md->holders, 1); in alloc_dev()
2318 atomic_set(&md->open_count, 0); in alloc_dev()
2319 atomic_set(&md->event_nr, 0); in alloc_dev()
2320 atomic_set(&md->uevent_seq, 0); in alloc_dev()
2321 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
2322 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
2323 spin_lock_init(&md->uevent_lock); in alloc_dev()
2325 md->queue = blk_alloc_queue(GFP_KERNEL); in alloc_dev()
2326 if (!md->queue) in alloc_dev()
2329 dm_init_md_queue(md); in alloc_dev()
2331 md->disk = alloc_disk(1); in alloc_dev()
2332 if (!md->disk) in alloc_dev()
2335 atomic_set(&md->pending[0], 0); in alloc_dev()
2336 atomic_set(&md->pending[1], 0); in alloc_dev()
2337 init_waitqueue_head(&md->wait); in alloc_dev()
2338 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
2339 init_waitqueue_head(&md->eventq); in alloc_dev()
2340 init_completion(&md->kobj_holder.completion); in alloc_dev()
2341 md->kworker_task = NULL; in alloc_dev()
2343 md->disk->major = _major; in alloc_dev()
2344 md->disk->first_minor = minor; in alloc_dev()
2345 md->disk->fops = &dm_blk_dops; in alloc_dev()
2346 md->disk->queue = md->queue; in alloc_dev()
2347 md->disk->private_data = md; in alloc_dev()
2348 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
2349 add_disk(md->disk); in alloc_dev()
2350 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
2352 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
2353 if (!md->wq) in alloc_dev()
2356 md->bdev = bdget_disk(md->disk, 0); in alloc_dev()
2357 if (!md->bdev) in alloc_dev()
2360 bio_init(&md->flush_bio); in alloc_dev()
2361 md->flush_bio.bi_bdev = md->bdev; in alloc_dev()
2362 md->flush_bio.bi_rw = WRITE_FLUSH; in alloc_dev()
2364 dm_stats_init(&md->stats); in alloc_dev()
2368 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
2373 return md; in alloc_dev()
2376 cleanup_mapped_device(md); in alloc_dev()
2382 kfree(md); in alloc_dev()
2386 static void unlock_fs(struct mapped_device *md);
2388 static void free_dev(struct mapped_device *md) in free_dev() argument
2390 int minor = MINOR(disk_devt(md->disk)); in free_dev()
2392 unlock_fs(md); in free_dev()
2394 cleanup_mapped_device(md); in free_dev()
2395 if (md->use_blk_mq) in free_dev()
2396 blk_mq_free_tag_set(&md->tag_set); in free_dev()
2398 free_table_devices(&md->table_devices); in free_dev()
2399 dm_stats_cleanup(&md->stats); in free_dev()
2403 kfree(md); in free_dev()
2406 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools() argument
2410 if (md->bs) { in __bind_mempools()
2417 bioset_free(md->bs); in __bind_mempools()
2418 md->bs = p->bs; in __bind_mempools()
2432 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); in __bind_mempools()
2434 md->io_pool = p->io_pool; in __bind_mempools()
2436 md->rq_pool = p->rq_pool; in __bind_mempools()
2438 md->bs = p->bs; in __bind_mempools()
2453 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
2455 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2456 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2457 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2459 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2461 atomic_inc(&md->event_nr); in event_callback()
2462 wake_up(&md->eventq); in event_callback()
2468 static void __set_size(struct mapped_device *md, sector_t size) in __set_size() argument
2470 set_capacity(md->disk, size); in __set_size()
2472 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); in __set_size()
2478 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
2482 struct request_queue *q = md->queue; in __bind()
2490 if (size != dm_get_size(md)) in __bind()
2491 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2493 __set_size(md, size); in __bind()
2495 dm_table_event_callback(t, event_callback, md); in __bind()
2507 __bind_mempools(md, t); in __bind()
2509 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2510 rcu_assign_pointer(md->map, t); in __bind()
2511 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2515 dm_sync_table(md); in __bind()
2523 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
2525 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2531 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2532 dm_sync_table(md); in __unbind()
2542 struct mapped_device *md; in dm_create() local
2544 md = alloc_dev(minor); in dm_create()
2545 if (!md) in dm_create()
2548 dm_sysfs_init(md); in dm_create()
2550 *result = md; in dm_create()
2558 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2560 mutex_lock(&md->type_lock); in dm_lock_md_type()
2563 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2565 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2568 void dm_set_md_type(struct mapped_device *md, unsigned type) in dm_set_md_type() argument
2570 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2571 md->type = type; in dm_set_md_type()
2574 unsigned dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2576 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_get_md_type()
2577 return md->type; in dm_get_md_type()
2580 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2582 return md->immutable_target_type; in dm_get_immutable_target_type()
2589 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
2591 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2592 return &md->queue->limits; in dm_get_queue_limits()
2596 static void init_rq_based_worker_thread(struct mapped_device *md) in init_rq_based_worker_thread() argument
2599 init_kthread_worker(&md->kworker); in init_rq_based_worker_thread()
2600 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, in init_rq_based_worker_thread()
2601 "kdmwork-%s", dm_device_name(md)); in init_rq_based_worker_thread()
2607 static int dm_init_request_based_queue(struct mapped_device *md) in dm_init_request_based_queue() argument
2612 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); in dm_init_request_based_queue()
2617 md->seq_rq_merge_deadline_usecs = 0; in dm_init_request_based_queue()
2619 md->queue = q; in dm_init_request_based_queue()
2620 dm_init_old_md_queue(md); in dm_init_request_based_queue()
2621 blk_queue_softirq_done(md->queue, dm_softirq_done); in dm_init_request_based_queue()
2622 blk_queue_prep_rq(md->queue, dm_prep_fn); in dm_init_request_based_queue()
2624 init_rq_based_worker_thread(md); in dm_init_request_based_queue()
2626 elv_register_queue(md->queue); in dm_init_request_based_queue()
2635 struct mapped_device *md = data; in dm_mq_init_request() local
2642 tio->md = md; in dm_mq_init_request()
2652 struct mapped_device *md = tio->md; in dm_mq_queue_rq() local
2654 struct dm_table *map = dm_get_live_table(md, &srcu_idx); in dm_mq_queue_rq()
2665 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
2671 dm_start_request(md, rq); in dm_mq_queue_rq()
2674 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
2679 dm_start_request(md, rq); in dm_mq_queue_rq()
2682 init_tio(tio, rq, md); in dm_mq_queue_rq()
2694 (void) clone_rq(rq, md, tio, GFP_ATOMIC); in dm_mq_queue_rq()
2695 queue_kthread_work(&md->kworker, &tio->work); in dm_mq_queue_rq()
2698 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { in dm_mq_queue_rq()
2700 rq_end_stats(md, rq); in dm_mq_queue_rq()
2701 rq_completed(md, rq_data_dir(rq), false); in dm_mq_queue_rq()
2716 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) in dm_init_request_based_blk_mq_queue() argument
2718 unsigned md_type = dm_get_md_type(md); in dm_init_request_based_blk_mq_queue()
2722 memset(&md->tag_set, 0, sizeof(md->tag_set)); in dm_init_request_based_blk_mq_queue()
2723 md->tag_set.ops = &dm_mq_ops; in dm_init_request_based_blk_mq_queue()
2724 md->tag_set.queue_depth = BLKDEV_MAX_RQ; in dm_init_request_based_blk_mq_queue()
2725 md->tag_set.numa_node = NUMA_NO_NODE; in dm_init_request_based_blk_mq_queue()
2726 md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in dm_init_request_based_blk_mq_queue()
2727 md->tag_set.nr_hw_queues = 1; in dm_init_request_based_blk_mq_queue()
2730 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); in dm_init_request_based_blk_mq_queue()
2732 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); in dm_init_request_based_blk_mq_queue()
2733 md->tag_set.driver_data = md; in dm_init_request_based_blk_mq_queue()
2735 err = blk_mq_alloc_tag_set(&md->tag_set); in dm_init_request_based_blk_mq_queue()
2739 q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); in dm_init_request_based_blk_mq_queue()
2744 md->queue = q; in dm_init_request_based_blk_mq_queue()
2745 dm_init_md_queue(md); in dm_init_request_based_blk_mq_queue()
2748 blk_mq_register_disk(md->disk); in dm_init_request_based_blk_mq_queue()
2751 init_rq_based_worker_thread(md); in dm_init_request_based_blk_mq_queue()
2756 blk_mq_free_tag_set(&md->tag_set); in dm_init_request_based_blk_mq_queue()
2760 static unsigned filter_md_type(unsigned type, struct mapped_device *md) in filter_md_type() argument
2765 return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; in filter_md_type()
2771 int dm_setup_md_queue(struct mapped_device *md) in dm_setup_md_queue() argument
2774 unsigned md_type = filter_md_type(dm_get_md_type(md), md); in dm_setup_md_queue()
2778 r = dm_init_request_based_queue(md); in dm_setup_md_queue()
2785 r = dm_init_request_based_blk_mq_queue(md); in dm_setup_md_queue()
2792 dm_init_old_md_queue(md); in dm_setup_md_queue()
2793 blk_queue_make_request(md->queue, dm_make_request); in dm_setup_md_queue()
2798 bioset_free(md->queue->bio_split); in dm_setup_md_queue()
2799 md->queue->bio_split = NULL; in dm_setup_md_queue()
2808 struct mapped_device *md; in dm_get_md() local
2816 md = idr_find(&_minor_idr, minor); in dm_get_md()
2817 if (md) { in dm_get_md()
2818 if ((md == MINOR_ALLOCED || in dm_get_md()
2819 (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2820 dm_deleting_md(md) || in dm_get_md()
2821 test_bit(DMF_FREEING, &md->flags))) { in dm_get_md()
2822 md = NULL; in dm_get_md()
2825 dm_get(md); in dm_get_md()
2831 return md; in dm_get_md()
2835 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2837 return md->interface_ptr; in dm_get_mdptr()
2840 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2842 md->interface_ptr = ptr; in dm_set_mdptr()
2845 void dm_get(struct mapped_device *md) in dm_get() argument
2847 atomic_inc(&md->holders); in dm_get()
2848 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2851 int dm_hold(struct mapped_device *md) in dm_hold() argument
2854 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2858 dm_get(md); in dm_hold()
2864 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2866 return md->name; in dm_device_name()
2870 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2878 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2879 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2882 if (dm_request_based(md) && md->kworker_task) in __dm_destroy()
2883 flush_kthread_worker(&md->kworker); in __dm_destroy()
2889 mutex_lock(&md->suspend_lock); in __dm_destroy()
2890 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2891 if (!dm_suspended_md(md)) { in __dm_destroy()
2896 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2897 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2906 while (atomic_read(&md->holders)) in __dm_destroy()
2908 else if (atomic_read(&md->holders)) in __dm_destroy()
2910 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2912 dm_sysfs_exit(md); in __dm_destroy()
2913 dm_table_destroy(__unbind(md)); in __dm_destroy()
2914 free_dev(md); in __dm_destroy()
2917 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2919 __dm_destroy(md, true); in dm_destroy()
2922 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2924 __dm_destroy(md, false); in dm_destroy_immediate()
2927 void dm_put(struct mapped_device *md) in dm_put() argument
2929 atomic_dec(&md->holders); in dm_put()
2933 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) in dm_wait_for_completion() argument
2938 add_wait_queue(&md->wait, &wait); in dm_wait_for_completion()
2943 if (!md_in_flight(md)) in dm_wait_for_completion()
2956 remove_wait_queue(&md->wait, &wait); in dm_wait_for_completion()
2966 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work() local
2972 map = dm_get_live_table(md, &srcu_idx); in dm_wq_work()
2974 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2975 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2976 c = bio_list_pop(&md->deferred); in dm_wq_work()
2977 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2982 if (dm_request_based(md)) in dm_wq_work()
2985 __split_and_process_bio(md, map, c); in dm_wq_work()
2988 dm_put_live_table(md, srcu_idx); in dm_wq_work()
2991 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2993 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2995 queue_work(md->wq, &md->work); in dm_queue_flush()
3001 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
3007 mutex_lock(&md->suspend_lock); in dm_swap_table()
3010 if (!dm_suspended_md(md)) in dm_swap_table()
3020 live_map = dm_get_live_table_fast(md); in dm_swap_table()
3022 limits = md->queue->limits; in dm_swap_table()
3023 dm_put_live_table_fast(md); in dm_swap_table()
3034 map = __bind(md, table, &limits); in dm_swap_table()
3037 mutex_unlock(&md->suspend_lock); in dm_swap_table()
3045 static int lock_fs(struct mapped_device *md) in lock_fs() argument
3049 WARN_ON(md->frozen_sb); in lock_fs()
3051 md->frozen_sb = freeze_bdev(md->bdev); in lock_fs()
3052 if (IS_ERR(md->frozen_sb)) { in lock_fs()
3053 r = PTR_ERR(md->frozen_sb); in lock_fs()
3054 md->frozen_sb = NULL; in lock_fs()
3058 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
3063 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
3065 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
3068 thaw_bdev(md->bdev, md->frozen_sb); in unlock_fs()
3069 md->frozen_sb = NULL; in unlock_fs()
3070 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
3080 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
3092 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
3107 r = lock_fs(md); in __dm_suspend()
3126 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
3128 synchronize_srcu(&md->io_barrier); in __dm_suspend()
3134 if (dm_request_based(md)) { in __dm_suspend()
3135 stop_queue(md->queue); in __dm_suspend()
3136 if (md->kworker_task) in __dm_suspend()
3137 flush_kthread_worker(&md->kworker); in __dm_suspend()
3140 flush_workqueue(md->wq); in __dm_suspend()
3147 r = dm_wait_for_completion(md, interruptible); in __dm_suspend()
3150 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
3152 synchronize_srcu(&md->io_barrier); in __dm_suspend()
3156 dm_queue_flush(md); in __dm_suspend()
3158 if (dm_request_based(md)) in __dm_suspend()
3159 start_queue(md->queue); in __dm_suspend()
3161 unlock_fs(md); in __dm_suspend()
3185 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend() argument
3191 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
3193 if (dm_suspended_md(md)) { in dm_suspend()
3198 if (dm_suspended_internally_md(md)) { in dm_suspend()
3200 mutex_unlock(&md->suspend_lock); in dm_suspend()
3201 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
3207 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
3209 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); in dm_suspend()
3213 set_bit(DMF_SUSPENDED, &md->flags); in dm_suspend()
3218 mutex_unlock(&md->suspend_lock); in dm_suspend()
3222 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
3230 dm_queue_flush(md); in __dm_resume()
3237 if (dm_request_based(md)) in __dm_resume()
3238 start_queue(md->queue); in __dm_resume()
3240 unlock_fs(md); in __dm_resume()
3245 int dm_resume(struct mapped_device *md) in dm_resume() argument
3251 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
3253 if (!dm_suspended_md(md)) in dm_resume()
3256 if (dm_suspended_internally_md(md)) { in dm_resume()
3258 mutex_unlock(&md->suspend_lock); in dm_resume()
3259 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
3265 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
3269 r = __dm_resume(md, map); in dm_resume()
3273 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
3277 mutex_unlock(&md->suspend_lock); in dm_resume()
3288 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend() argument
3292 if (md->internal_suspend_count++) in __dm_internal_suspend()
3295 if (dm_suspended_md(md)) { in __dm_internal_suspend()
3296 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
3300 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
3308 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); in __dm_internal_suspend()
3310 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
3315 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
3317 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
3319 if (--md->internal_suspend_count) in __dm_internal_resume()
3322 if (dm_suspended_md(md)) in __dm_internal_resume()
3329 (void) __dm_resume(md, NULL); in __dm_internal_resume()
3332 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
3334 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
3337 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
3339 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
3340 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
3341 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
3345 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
3347 mutex_lock(&md->suspend_lock); in dm_internal_resume()
3348 __dm_internal_resume(md); in dm_internal_resume()
3349 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
3358 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
3360 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
3361 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
3364 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
3365 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
3366 flush_workqueue(md->wq); in dm_internal_suspend_fast()
3367 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
3371 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
3373 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
3376 dm_queue_flush(md); in dm_internal_resume_fast()
3379 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
3386 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
3393 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
3397 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
3402 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
3404 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
3407 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
3409 return atomic_read(&md->event_nr); in dm_get_event_nr()
3412 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
3414 return wait_event_interruptible(md->eventq, in dm_wait_event()
3415 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
3418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
3422 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
3423 list_add(elist, &md->uevent_list); in dm_uevent_add()
3424 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
3431 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
3433 return md->disk; in dm_disk()
3437 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
3439 return &md->kobj_holder.kobj; in dm_kobject()
3444 struct mapped_device *md; in dm_get_from_kobject() local
3446 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
3448 if (test_bit(DMF_FREEING, &md->flags) || in dm_get_from_kobject()
3449 dm_deleting_md(md)) in dm_get_from_kobject()
3452 dm_get(md); in dm_get_from_kobject()
3453 return md; in dm_get_from_kobject()
3456 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
3458 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
3461 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
3463 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
3466 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
3468 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
3483 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, in dm_alloc_md_mempools() argument
3494 type = filter_md_type(type, md); in dm_alloc_md_mempools()
3558 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_register() local
3564 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); in dm_pr_register()
3574 dm_put_live_table(md, srcu_idx); in dm_pr_register()
3581 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
3587 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); in dm_pr_reserve()
3597 dm_put_live_table(md, srcu_idx); in dm_pr_reserve()
3603 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
3609 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); in dm_pr_release()
3619 dm_put_live_table(md, srcu_idx); in dm_pr_release()
3626 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
3632 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); in dm_pr_preempt()
3642 dm_put_live_table(md, srcu_idx); in dm_pr_preempt()
3648 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3654 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); in dm_pr_clear()
3664 dm_put_live_table(md, srcu_idx); in dm_pr_clear()