Lines Matching refs:hctx
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
44 for (i = 0; i < hctx->ctx_map.size; i++) in blk_mq_hctx_has_pending()
45 if (hctx->ctx_map.map[i].word) in blk_mq_hctx_has_pending()
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, in get_bm() argument
54 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; in get_bm()
57 #define CTX_TO_BIT(hctx, ctx) \ argument
58 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
66 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_mark_pending()
68 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) in blk_mq_hctx_mark_pending()
69 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); in blk_mq_hctx_mark_pending()
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
75 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_clear_pending()
77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); in blk_mq_hctx_clear_pending()
161 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
164 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
165 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
166 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
176 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue() argument
178 return blk_mq_has_free_tags(hctx->tags); in blk_mq_can_queue()
238 rq = data->hctx->tags->rqs[tag]; in __blk_mq_alloc_request()
240 if (blk_mq_tag_busy(data->hctx)) { in __blk_mq_alloc_request()
242 atomic_inc(&data->hctx->nr_active); in __blk_mq_alloc_request()
257 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_request() local
267 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
269 reserved, ctx, hctx); in blk_mq_alloc_request()
273 __blk_mq_run_hw_queue(hctx); in blk_mq_alloc_request()
277 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
279 hctx); in blk_mq_alloc_request()
292 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_free_request() argument
299 atomic_dec(&hctx->nr_active); in __blk_mq_free_request()
303 blk_mq_put_tag(hctx, tag, &ctx->last_tag); in __blk_mq_free_request()
307 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in blk_mq_free_hctx_request() argument
312 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_free_hctx_request()
319 struct blk_mq_hw_ctx *hctx; in blk_mq_free_request() local
322 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
323 blk_mq_free_hctx_request(hctx, rq); in blk_mq_free_request()
627 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired() argument
662 struct blk_mq_hw_ctx *hctx; in blk_mq_rq_timer() local
665 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
670 if (!blk_mq_hw_queue_mapped(hctx)) in blk_mq_rq_timer()
673 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); in blk_mq_rq_timer()
680 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
682 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_rq_timer()
683 blk_mq_tag_idle(hctx); in blk_mq_rq_timer()
731 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in flush_busy_ctxs() argument
736 for (i = 0; i < hctx->ctx_map.size; i++) { in flush_busy_ctxs()
737 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; in flush_busy_ctxs()
744 off = i * hctx->ctx_map.bits_per_word; in flush_busy_ctxs()
750 ctx = hctx->ctxs[bit + off]; in flush_busy_ctxs()
767 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue() argument
769 struct request_queue *q = hctx->queue; in __blk_mq_run_hw_queue()
776 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); in __blk_mq_run_hw_queue()
778 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) in __blk_mq_run_hw_queue()
781 hctx->run++; in __blk_mq_run_hw_queue()
786 flush_busy_ctxs(hctx, &rq_list); in __blk_mq_run_hw_queue()
792 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_run_hw_queue()
793 spin_lock(&hctx->lock); in __blk_mq_run_hw_queue()
794 if (!list_empty(&hctx->dispatch)) in __blk_mq_run_hw_queue()
795 list_splice_init(&hctx->dispatch, &rq_list); in __blk_mq_run_hw_queue()
796 spin_unlock(&hctx->lock); in __blk_mq_run_hw_queue()
820 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
849 hctx->dispatched[0]++; in __blk_mq_run_hw_queue()
851 hctx->dispatched[ilog2(queued) + 1]++; in __blk_mq_run_hw_queue()
858 spin_lock(&hctx->lock); in __blk_mq_run_hw_queue()
859 list_splice(&rq_list, &hctx->dispatch); in __blk_mq_run_hw_queue()
860 spin_unlock(&hctx->lock); in __blk_mq_run_hw_queue()
870 blk_mq_run_hw_queue(hctx, true); in __blk_mq_run_hw_queue()
880 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
882 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
885 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
886 int cpu = hctx->next_cpu, next_cpu; in blk_mq_hctx_next_cpu()
888 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); in blk_mq_hctx_next_cpu()
890 next_cpu = cpumask_first(hctx->cpumask); in blk_mq_hctx_next_cpu()
892 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
893 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
898 return hctx->next_cpu; in blk_mq_hctx_next_cpu()
901 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
903 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || in blk_mq_run_hw_queue()
904 !blk_mq_hw_queue_mapped(hctx))) in blk_mq_run_hw_queue()
909 if (cpumask_test_cpu(cpu, hctx->cpumask)) { in blk_mq_run_hw_queue()
910 __blk_mq_run_hw_queue(hctx); in blk_mq_run_hw_queue()
918 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), in blk_mq_run_hw_queue()
919 &hctx->run_work, 0); in blk_mq_run_hw_queue()
924 struct blk_mq_hw_ctx *hctx; in blk_mq_run_hw_queues() local
927 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
928 if ((!blk_mq_hctx_has_pending(hctx) && in blk_mq_run_hw_queues()
929 list_empty_careful(&hctx->dispatch)) || in blk_mq_run_hw_queues()
930 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_run_hw_queues()
933 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
938 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
940 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
941 cancel_delayed_work(&hctx->delay_work); in blk_mq_stop_hw_queue()
942 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
948 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
951 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
952 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
956 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
958 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
960 blk_mq_run_hw_queue(hctx, false); in blk_mq_start_hw_queue()
966 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
969 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
970 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
976 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
979 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_start_stopped_hw_queues()
980 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_start_stopped_hw_queues()
983 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queues()
984 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queues()
991 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn() local
993 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
995 __blk_mq_run_hw_queue(hctx); in blk_mq_run_work_fn()
1000 struct blk_mq_hw_ctx *hctx; in blk_mq_delay_work_fn() local
1002 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); in blk_mq_delay_work_fn()
1004 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_delay_work_fn()
1005 __blk_mq_run_hw_queue(hctx); in blk_mq_delay_work_fn()
1008 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_queue() argument
1010 if (unlikely(!blk_mq_hw_queue_mapped(hctx))) in blk_mq_delay_queue()
1013 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), in blk_mq_delay_queue()
1014 &hctx->delay_work, msecs_to_jiffies(msecs)); in blk_mq_delay_queue()
1018 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_request() argument
1023 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_request()
1030 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1037 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_request() local
1044 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1047 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_insert_request()
1051 blk_mq_run_hw_queue(hctx, async); in blk_mq_insert_request()
1063 struct blk_mq_hw_ctx *hctx; in blk_mq_insert_requests() local
1072 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1085 __blk_mq_insert_request(hctx, rq, false); in blk_mq_insert_requests()
1089 blk_mq_run_hw_queue(hctx, from_schedule); in blk_mq_insert_requests()
1158 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) in hctx_allow_merges() argument
1160 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && in hctx_allow_merges()
1161 !blk_queue_nomerges(hctx->queue); in hctx_allow_merges()
1164 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, in blk_mq_merge_queue_io() argument
1168 if (!hctx_allow_merges(hctx)) { in blk_mq_merge_queue_io()
1172 __blk_mq_insert_request(hctx, rq, false); in blk_mq_merge_queue_io()
1176 struct request_queue *q = hctx->queue; in blk_mq_merge_queue_io()
1185 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_merge_queue_io()
1191 struct blk_mq_hw_ctx *hctx; member
1199 struct blk_mq_hw_ctx *hctx; in blk_mq_map_request() local
1211 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1218 hctx); in blk_mq_map_request()
1221 __blk_mq_run_hw_queue(hctx); in blk_mq_map_request()
1226 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1228 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); in blk_mq_map_request()
1231 hctx = alloc_data.hctx; in blk_mq_map_request()
1234 hctx->queued++; in blk_mq_map_request()
1235 data->hctx = hctx; in blk_mq_map_request()
1274 if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { in blk_mq_make_request()
1289 ret = q->mq_ops->queue_rq(data.hctx, &bd); in blk_mq_make_request()
1303 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_mq_make_request()
1311 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); in blk_mq_make_request()
1378 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_sq_make_request()
1386 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); in blk_sq_make_request()
1545 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) in blk_mq_hctx_cpu_offline() argument
1547 struct request_queue *q = hctx->queue; in blk_mq_hctx_cpu_offline()
1559 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_cpu_offline()
1577 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_hctx_cpu_offline()
1578 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_hctx_cpu_offline()
1582 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_cpu_offline()
1590 struct blk_mq_hw_ctx *hctx = data; in blk_mq_hctx_notify() local
1593 return blk_mq_hctx_cpu_offline(hctx, cpu); in blk_mq_hctx_notify()
1606 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
1610 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
1614 hctx->fq->flush_rq, hctx_idx, in blk_mq_exit_hctx()
1618 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
1620 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); in blk_mq_exit_hctx()
1621 blk_free_flush_queue(hctx->fq); in blk_mq_exit_hctx()
1622 blk_mq_free_bitmap(&hctx->ctx_map); in blk_mq_exit_hctx()
1628 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
1631 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
1634 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
1641 struct blk_mq_hw_ctx *hctx; in blk_mq_free_hw_queues() local
1644 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_free_hw_queues()
1645 free_cpumask_var(hctx->cpumask); in blk_mq_free_hw_queues()
1650 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
1655 node = hctx->numa_node; in blk_mq_init_hctx()
1657 node = hctx->numa_node = set->numa_node; in blk_mq_init_hctx()
1659 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_init_hctx()
1660 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); in blk_mq_init_hctx()
1661 spin_lock_init(&hctx->lock); in blk_mq_init_hctx()
1662 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_init_hctx()
1663 hctx->queue = q; in blk_mq_init_hctx()
1664 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
1665 hctx->flags = set->flags; in blk_mq_init_hctx()
1667 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, in blk_mq_init_hctx()
1668 blk_mq_hctx_notify, hctx); in blk_mq_init_hctx()
1669 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); in blk_mq_init_hctx()
1671 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
1677 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), in blk_mq_init_hctx()
1679 if (!hctx->ctxs) in blk_mq_init_hctx()
1682 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) in blk_mq_init_hctx()
1685 hctx->nr_ctx = 0; in blk_mq_init_hctx()
1688 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
1691 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
1692 if (!hctx->fq) in blk_mq_init_hctx()
1697 hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
1704 kfree(hctx->fq); in blk_mq_init_hctx()
1707 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
1709 blk_mq_free_bitmap(&hctx->ctx_map); in blk_mq_init_hctx()
1711 kfree(hctx->ctxs); in blk_mq_init_hctx()
1713 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); in blk_mq_init_hctx()
1721 struct blk_mq_hw_ctx *hctx; in blk_mq_init_hw_queues() local
1727 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_hw_queues()
1728 if (blk_mq_init_hctx(q, set, hctx, i)) in blk_mq_init_hw_queues()
1750 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
1762 hctx = q->mq_ops->map_queue(q, i); in blk_mq_init_cpu_queues()
1768 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
1769 hctx->numa_node = cpu_to_node(i); in blk_mq_init_cpu_queues()
1776 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
1780 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1781 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
1782 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
1793 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1794 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
1795 ctx->index_hw = hctx->nr_ctx; in blk_mq_map_swqueue()
1796 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
1799 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1800 struct blk_mq_ctxmap *map = &hctx->ctx_map; in blk_mq_map_swqueue()
1806 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
1811 hctx->tags = NULL; in blk_mq_map_swqueue()
1818 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
1819 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
1826 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); in blk_mq_map_swqueue()
1831 hctx->next_cpu = cpumask_first(hctx->cpumask); in blk_mq_map_swqueue()
1832 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
1838 struct blk_mq_hw_ctx *hctx; in blk_mq_update_tag_set_depth() local
1851 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_tag_set_depth()
1853 hctx->flags |= BLK_MQ_F_TAG_SHARED; in blk_mq_update_tag_set_depth()
1855 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; in blk_mq_update_tag_set_depth()
1890 struct blk_mq_hw_ctx *hctx; in blk_mq_release() local
1894 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
1895 if (!hctx) in blk_mq_release()
1897 kfree(hctx->ctxs); in blk_mq_release()
1898 kfree(hctx); in blk_mq_release()
2249 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
2256 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2257 ret = blk_mq_tag_update_depth(hctx->tags, nr); in blk_mq_update_nr_requests()