Lines Matching refs:q

81 void blk_mq_freeze_queue_start(struct request_queue *q)  in blk_mq_freeze_queue_start()  argument
85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); in blk_mq_freeze_queue_start()
87 percpu_ref_kill(&q->q_usage_counter); in blk_mq_freeze_queue_start()
88 blk_mq_run_hw_queues(q, false); in blk_mq_freeze_queue_start()
93 static void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument
95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); in blk_mq_freeze_queue_wait()
102 void blk_freeze_queue(struct request_queue *q) in blk_freeze_queue() argument
111 blk_mq_freeze_queue_start(q); in blk_freeze_queue()
112 blk_mq_freeze_queue_wait(q); in blk_freeze_queue()
115 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument
121 blk_freeze_queue(q); in blk_mq_freeze_queue()
125 void blk_mq_unfreeze_queue(struct request_queue *q) in blk_mq_unfreeze_queue() argument
129 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); in blk_mq_unfreeze_queue()
132 percpu_ref_reinit(&q->q_usage_counter); in blk_mq_unfreeze_queue()
133 wake_up_all(&q->mq_freeze_wq); in blk_mq_unfreeze_queue()
138 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters() argument
143 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
152 wake_up_all(&q->mq_freeze_wq); in blk_mq_wake_waiters()
161 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, in blk_mq_rq_ctx_init() argument
164 if (blk_queue_io_stat(q)) in blk_mq_rq_ctx_init()
169 rq->q = q; in blk_mq_rq_ctx_init()
225 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); in __blk_mq_alloc_request()
232 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, in blk_mq_alloc_request() argument
241 ret = blk_queue_enter(q, gfp); in blk_mq_alloc_request()
245 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
246 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
247 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_DIRECT_RECLAIM, in blk_mq_alloc_request()
255 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
256 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
257 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, in blk_mq_alloc_request()
264 blk_queue_exit(q); in blk_mq_alloc_request()
275 struct request_queue *q = rq->q; in __blk_mq_free_request() local
283 blk_queue_exit(q); in __blk_mq_free_request()
299 struct request_queue *q = rq->q; in blk_mq_free_request() local
301 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
332 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
341 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in blk_mq_ipi_complete_request()
342 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
347 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in blk_mq_ipi_complete_request()
356 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
363 struct request_queue *q = rq->q; in __blk_mq_complete_request() local
365 if (!q->softirq_done_fn) in __blk_mq_complete_request()
381 struct request_queue *q = rq->q; in blk_mq_complete_request() local
383 if (unlikely(blk_should_fake_timeout(q))) in blk_mq_complete_request()
400 struct request_queue *q = rq->q; in blk_mq_start_request() local
402 trace_block_rq_issue(q, rq); in blk_mq_start_request()
427 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
440 struct request_queue *q = rq->q; in __blk_mq_requeue_request() local
442 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
445 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
461 struct request_queue *q = in blk_mq_requeue_work() local
467 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_requeue_work()
468 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_requeue_work()
469 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_requeue_work()
490 blk_mq_start_hw_queues(q); in blk_mq_requeue_work()
495 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list() local
504 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
507 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
509 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
511 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
515 void blk_mq_cancel_requeue_work(struct request_queue *q) in blk_mq_cancel_requeue_work() argument
517 cancel_work_sync(&q->requeue_work); in blk_mq_cancel_requeue_work()
521 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list() argument
523 kblockd_schedule_work(&q->requeue_work); in blk_mq_kick_requeue_list()
527 void blk_mq_abort_requeue_list(struct request_queue *q) in blk_mq_abort_requeue_list() argument
532 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
533 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_abort_requeue_list()
534 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
560 struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out()
604 if (unlikely(blk_queue_dying(rq->q))) in blk_mq_check_expired()
622 struct request_queue *q = (struct request_queue *)priv; in blk_mq_rq_timer() local
629 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); in blk_mq_rq_timer()
633 mod_timer(&q->timeout, data.next); in blk_mq_rq_timer()
637 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
650 static bool blk_mq_attempt_merge(struct request_queue *q, in blk_mq_attempt_merge() argument
667 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
673 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
726 struct request_queue *q = hctx->queue; in __blk_mq_run_hw_queue() local
777 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
879 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues() argument
884 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
903 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues() argument
908 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
921 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues() argument
926 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
931 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues() argument
936 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_start_stopped_hw_queues()
1000 struct request_queue *q = rq->q; in blk_mq_insert_request() local
1004 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_request()
1008 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1020 static void blk_mq_insert_requests(struct request_queue *q, in blk_mq_insert_requests() argument
1030 trace_block_unplug(q, depth, !from_schedule); in blk_mq_insert_requests()
1032 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_requests()
1036 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1088 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1097 this_q = rq->q; in blk_mq_flush_plug_list()
1141 struct request_queue *q = hctx->queue; in blk_mq_merge_queue_io() local
1144 if (!blk_mq_attempt_merge(q, ctx, bio)) { in blk_mq_merge_queue_io()
1160 static struct request *blk_mq_map_request(struct request_queue *q, in blk_mq_map_request() argument
1170 blk_queue_enter_live(q); in blk_mq_map_request()
1171 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1172 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1177 trace_block_getrq(q, bio, rw); in blk_mq_map_request()
1178 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, in blk_mq_map_request()
1184 trace_block_sleeprq(q, bio, rw); in blk_mq_map_request()
1186 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1187 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1188 blk_mq_set_alloc_data(&alloc_data, q, in blk_mq_map_request()
1204 struct request_queue *q = rq->q; in blk_mq_direct_issue_request() local
1205 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, in blk_mq_direct_issue_request()
1219 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_direct_issue_request()
1242 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) in blk_mq_make_request() argument
1253 blk_queue_bounce(q, &bio); in blk_mq_make_request()
1260 blk_queue_split(q, &bio, q->bio_split); in blk_mq_make_request()
1262 if (!is_flush_fua && !blk_queue_nomerges(q)) { in blk_mq_make_request()
1263 if (blk_attempt_plug_merge(q, bio, &request_count, in blk_mq_make_request()
1267 request_count = blk_plug_queued_count(q); in blk_mq_make_request()
1269 rq = blk_mq_map_request(q, bio, &data); in blk_mq_make_request()
1287 if (((plug && !blk_queue_nomerges(q)) || is_sync) && in blk_mq_make_request()
1339 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) in blk_sq_make_request() argument
1349 blk_queue_bounce(q, &bio); in blk_sq_make_request()
1356 blk_queue_split(q, &bio, q->bio_split); in blk_sq_make_request()
1358 if (!is_flush_fua && !blk_queue_nomerges(q) && in blk_sq_make_request()
1359 blk_attempt_plug_merge(q, bio, &request_count, NULL)) in blk_sq_make_request()
1362 rq = blk_mq_map_request(q, bio, &data); in blk_sq_make_request()
1383 trace_block_plug(q); in blk_sq_make_request()
1389 trace_block_plug(q); in blk_sq_make_request()
1414 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) in blk_mq_map_queue() argument
1416 return q->queue_hw_ctx[q->mq_map[cpu]]; in blk_mq_map_queue()
1575 struct request_queue *q = hctx->queue; in blk_mq_hctx_cpu_offline() local
1582 ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_hctx_cpu_offline()
1594 ctx = blk_mq_get_ctx(q); in blk_mq_hctx_cpu_offline()
1605 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_hctx_cpu_offline()
1632 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx() argument
1653 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues() argument
1659 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
1662 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
1666 static void blk_mq_free_hw_queues(struct request_queue *q, in blk_mq_free_hw_queues() argument
1672 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_free_hw_queues()
1676 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx() argument
1691 hctx->queue = q; in blk_mq_init_hctx()
1719 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
1746 static int blk_mq_init_hw_queues(struct request_queue *q, in blk_mq_init_hw_queues() argument
1755 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_hw_queues()
1756 if (blk_mq_init_hctx(q, set, hctx, i)) in blk_mq_init_hw_queues()
1760 if (i == q->nr_hw_queues) in blk_mq_init_hw_queues()
1766 blk_mq_exit_hw_queues(q, set, i); in blk_mq_init_hw_queues()
1771 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues() argument
1777 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
1784 __ctx->queue = q; in blk_mq_init_cpu_queues()
1790 hctx = q->mq_ops->map_queue(q, i); in blk_mq_init_cpu_queues()
1801 static void blk_mq_map_swqueue(struct request_queue *q, in blk_mq_map_swqueue() argument
1807 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_map_swqueue()
1812 mutex_lock(&q->sysfs_lock); in blk_mq_map_swqueue()
1814 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1822 queue_for_each_ctx(q, ctx, i) { in blk_mq_map_swqueue()
1827 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1833 mutex_unlock(&q->sysfs_lock); in blk_mq_map_swqueue()
1835 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1871 queue_for_each_ctx(q, ctx, i) { in blk_mq_map_swqueue()
1875 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1880 static void queue_set_hctx_shared(struct request_queue *q, bool shared) in queue_set_hctx_shared() argument
1885 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
1895 struct request_queue *q; in blk_mq_update_tag_set_depth() local
1897 list_for_each_entry(q, &set->tag_list, tag_set_list) { in blk_mq_update_tag_set_depth()
1898 blk_mq_freeze_queue(q); in blk_mq_update_tag_set_depth()
1899 queue_set_hctx_shared(q, shared); in blk_mq_update_tag_set_depth()
1900 blk_mq_unfreeze_queue(q); in blk_mq_update_tag_set_depth()
1904 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set() argument
1906 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_del_queue_tag_set()
1909 list_del_init(&q->tag_set_list); in blk_mq_del_queue_tag_set()
1920 struct request_queue *q) in blk_mq_add_queue_tag_set() argument
1922 q->tag_set = set; in blk_mq_add_queue_tag_set()
1933 queue_set_hctx_shared(q, true); in blk_mq_add_queue_tag_set()
1934 list_add_tail(&q->tag_set_list, &set->tag_list); in blk_mq_add_queue_tag_set()
1945 void blk_mq_release(struct request_queue *q) in blk_mq_release() argument
1951 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
1958 kfree(q->mq_map); in blk_mq_release()
1959 q->mq_map = NULL; in blk_mq_release()
1961 kfree(q->queue_hw_ctx); in blk_mq_release()
1964 free_percpu(q->queue_ctx); in blk_mq_release()
1969 struct request_queue *uninit_q, *q; in blk_mq_init_queue() local
1975 q = blk_mq_init_allocated_queue(set, uninit_q); in blk_mq_init_queue()
1976 if (IS_ERR(q)) in blk_mq_init_queue()
1979 return q; in blk_mq_init_queue()
1984 struct request_queue *q) in blk_mq_init_allocated_queue() argument
2022 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); in blk_mq_init_allocated_queue()
2023 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); in blk_mq_init_allocated_queue()
2025 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
2026 q->nr_hw_queues = set->nr_hw_queues; in blk_mq_init_allocated_queue()
2027 q->mq_map = map; in blk_mq_init_allocated_queue()
2029 q->queue_ctx = ctx; in blk_mq_init_allocated_queue()
2030 q->queue_hw_ctx = hctxs; in blk_mq_init_allocated_queue()
2032 q->mq_ops = set->ops; in blk_mq_init_allocated_queue()
2033 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; in blk_mq_init_allocated_queue()
2036 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; in blk_mq_init_allocated_queue()
2038 q->sg_reserved_size = INT_MAX; in blk_mq_init_allocated_queue()
2040 INIT_WORK(&q->requeue_work, blk_mq_requeue_work); in blk_mq_init_allocated_queue()
2041 INIT_LIST_HEAD(&q->requeue_list); in blk_mq_init_allocated_queue()
2042 spin_lock_init(&q->requeue_lock); in blk_mq_init_allocated_queue()
2044 if (q->nr_hw_queues > 1) in blk_mq_init_allocated_queue()
2045 blk_queue_make_request(q, blk_mq_make_request); in blk_mq_init_allocated_queue()
2047 blk_queue_make_request(q, blk_sq_make_request); in blk_mq_init_allocated_queue()
2052 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue()
2055 blk_queue_softirq_done(q, set->ops->complete); in blk_mq_init_allocated_queue()
2057 blk_mq_init_cpu_queues(q, set->nr_hw_queues); in blk_mq_init_allocated_queue()
2059 if (blk_mq_init_hw_queues(q, set)) in blk_mq_init_allocated_queue()
2065 list_add_tail(&q->all_q_node, &all_q_list); in blk_mq_init_allocated_queue()
2066 blk_mq_add_queue_tag_set(set, q); in blk_mq_init_allocated_queue()
2067 blk_mq_map_swqueue(q, cpu_online_mask); in blk_mq_init_allocated_queue()
2072 return q; in blk_mq_init_allocated_queue()
2090 void blk_mq_free_queue(struct request_queue *q) in blk_mq_free_queue() argument
2092 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_free_queue()
2095 list_del_init(&q->all_q_node); in blk_mq_free_queue()
2098 blk_mq_del_queue_tag_set(q); in blk_mq_free_queue()
2100 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); in blk_mq_free_queue()
2101 blk_mq_free_hw_queues(q, set); in blk_mq_free_queue()
2105 static void blk_mq_queue_reinit(struct request_queue *q, in blk_mq_queue_reinit() argument
2108 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); in blk_mq_queue_reinit()
2110 blk_mq_sysfs_unregister(q); in blk_mq_queue_reinit()
2112 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); in blk_mq_queue_reinit()
2120 blk_mq_map_swqueue(q, online_mask); in blk_mq_queue_reinit()
2122 blk_mq_sysfs_register(q); in blk_mq_queue_reinit()
2128 struct request_queue *q; in blk_mq_queue_reinit_notify() local
2176 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2177 blk_mq_freeze_queue_start(q); in blk_mq_queue_reinit_notify()
2178 list_for_each_entry(q, &all_q_list, all_q_node) { in blk_mq_queue_reinit_notify()
2179 blk_mq_freeze_queue_wait(q); in blk_mq_queue_reinit_notify()
2185 del_timer_sync(&q->timeout); in blk_mq_queue_reinit_notify()
2188 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2189 blk_mq_queue_reinit(q, &online_new); in blk_mq_queue_reinit_notify()
2191 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2192 blk_mq_unfreeze_queue(q); in blk_mq_queue_reinit_notify()
2328 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests() argument
2330 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_update_nr_requests()
2338 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2345 q->nr_requests = nr; in blk_mq_update_nr_requests()