Lines Matching refs:q
34 struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
38 if (!q->mq_ops) in blk_get_flush_queue()
39 return q->fq; in blk_get_flush_queue()
41 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
46 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
48 kobject_get(&q->kobj); in __blk_get_queue()
51 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
53 void blk_free_flush_queue(struct blk_flush_queue *q);
55 int blk_init_rl(struct request_list *rl, struct request_queue *q,
59 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
61 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
63 void blk_queue_bypass_start(struct request_queue *q);
64 void blk_queue_bypass_end(struct request_queue *q);
66 void __blk_queue_free_tags(struct request_queue *q);
76 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
78 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
80 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
116 static inline struct request *__elv_next_request(struct request_queue *q) in __elv_next_request() argument
119 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in __elv_next_request()
122 if (!list_empty(&q->queue_head)) { in __elv_next_request()
123 rq = list_entry_rq(q->queue_head.next); in __elv_next_request()
143 !queue_flush_queueable(q)) { in __elv_next_request()
147 if (unlikely(blk_queue_bypass(q)) || in __elv_next_request()
148 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) in __elv_next_request()
153 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) in elv_activate_rq() argument
155 struct elevator_queue *e = q->elevator; in elv_activate_rq()
158 e->type->ops.elevator_activate_req_fn(q, rq); in elv_activate_rq()
161 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) in elv_deactivate_rq() argument
163 struct elevator_queue *e = q->elevator; in elv_deactivate_rq()
166 e->type->ops.elevator_deactivate_req_fn(q, rq); in elv_deactivate_rq()
175 static inline int blk_should_fake_timeout(struct request_queue *q) in blk_should_fake_timeout() argument
181 int ll_back_merge_fn(struct request_queue *q, struct request *req,
183 int ll_front_merge_fn(struct request_queue *q, struct request *req,
185 int attempt_back_merge(struct request_queue *q, struct request *rq);
186 int attempt_front_merge(struct request_queue *q, struct request *rq);
187 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
194 void blk_queue_congestion_threshold(struct request_queue *q);
196 void __blk_run_queue_uncond(struct request_queue *q);
206 static inline int queue_congestion_on_threshold(struct request_queue *q) in queue_congestion_on_threshold() argument
208 return q->nr_congestion_on; in queue_congestion_on_threshold()
214 static inline int queue_congestion_off_threshold(struct request_queue *q) in queue_congestion_off_threshold() argument
216 return q->nr_congestion_off; in queue_congestion_off_threshold()
239 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
240 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
242 void ioc_clear_queue(struct request_queue *q);
270 extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
271 extern void blk_throtl_drain(struct request_queue *q);
272 extern int blk_throtl_init(struct request_queue *q);
273 extern void blk_throtl_exit(struct request_queue *q);
275 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) in blk_throtl_bio() argument
279 static inline void blk_throtl_drain(struct request_queue *q) { } in blk_throtl_drain() argument
280 static inline int blk_throtl_init(struct request_queue *q) { return 0; } in blk_throtl_init() argument
281 static inline void blk_throtl_exit(struct request_queue *q) { } in blk_throtl_exit() argument