Lines Matching refs:q
40 struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
44 if (!q->mq_ops) in blk_get_flush_queue()
45 return q->fq; in blk_get_flush_queue()
47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
52 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
54 kobject_get(&q->kobj); in __blk_get_queue()
57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
59 void blk_free_flush_queue(struct blk_flush_queue *q);
61 int blk_init_rl(struct request_list *rl, struct request_queue *q,
65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
67 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
69 void blk_queue_bypass_start(struct request_queue *q);
70 void blk_queue_bypass_end(struct request_queue *q);
72 void __blk_queue_free_tags(struct request_queue *q);
75 void blk_freeze_queue(struct request_queue *q);
77 static inline void blk_queue_enter_live(struct request_queue *q) in blk_queue_enter_live() argument
85 percpu_ref_get(&q->q_usage_counter); in blk_queue_enter_live()
102 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
104 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
106 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
109 unsigned int blk_plug_queued_count(struct request_queue *q);
144 static inline struct request *__elv_next_request(struct request_queue *q) in __elv_next_request() argument
147 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in __elv_next_request()
150 if (!list_empty(&q->queue_head)) { in __elv_next_request()
151 rq = list_entry_rq(q->queue_head.next); in __elv_next_request()
171 !queue_flush_queueable(q)) { in __elv_next_request()
175 if (unlikely(blk_queue_bypass(q)) || in __elv_next_request()
176 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) in __elv_next_request()
181 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) in elv_activate_rq() argument
183 struct elevator_queue *e = q->elevator; in elv_activate_rq()
186 e->type->ops.elevator_activate_req_fn(q, rq); in elv_activate_rq()
189 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) in elv_deactivate_rq() argument
191 struct elevator_queue *e = q->elevator; in elv_deactivate_rq()
194 e->type->ops.elevator_deactivate_req_fn(q, rq); in elv_deactivate_rq()
203 static inline int blk_should_fake_timeout(struct request_queue *q) in blk_should_fake_timeout() argument
209 int ll_back_merge_fn(struct request_queue *q, struct request *req,
211 int ll_front_merge_fn(struct request_queue *q, struct request *req,
213 int attempt_back_merge(struct request_queue *q, struct request *rq);
214 int attempt_front_merge(struct request_queue *q, struct request *rq);
215 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
222 void blk_queue_congestion_threshold(struct request_queue *q);
232 static inline int queue_congestion_on_threshold(struct request_queue *q) in queue_congestion_on_threshold() argument
234 return q->nr_congestion_on; in queue_congestion_on_threshold()
240 static inline int queue_congestion_off_threshold(struct request_queue *q) in queue_congestion_off_threshold() argument
242 return q->nr_congestion_off; in queue_congestion_off_threshold()
265 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
266 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
268 void ioc_clear_queue(struct request_queue *q);
296 extern void blk_throtl_drain(struct request_queue *q);
297 extern int blk_throtl_init(struct request_queue *q);
298 extern void blk_throtl_exit(struct request_queue *q);
300 static inline void blk_throtl_drain(struct request_queue *q) { } in blk_throtl_drain() argument
301 static inline int blk_throtl_init(struct request_queue *q) { return 0; } in blk_throtl_init() argument
302 static inline void blk_throtl_exit(struct request_queue *q) { } in blk_throtl_exit() argument