Lines Matching refs:q

75 	if (rl == &rl->q->root_rl)  in blk_clear_congested()
76 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_clear_congested()
86 if (rl == &rl->q->root_rl) in blk_set_congested()
87 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_set_congested()
91 void blk_queue_congestion_threshold(struct request_queue *q) in blk_queue_congestion_threshold() argument
95 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold()
96 if (nr > q->nr_requests) in blk_queue_congestion_threshold()
97 nr = q->nr_requests; in blk_queue_congestion_threshold()
98 q->nr_congestion_on = nr; in blk_queue_congestion_threshold()
100 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold()
103 q->nr_congestion_off = nr; in blk_queue_congestion_threshold()
116 struct request_queue *q = bdev_get_queue(bdev); in blk_get_backing_dev_info() local
118 return &q->backing_dev_info; in blk_get_backing_dev_info()
122 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
129 rq->q = q; in blk_rq_init()
183 struct request_queue *q; in blk_delay_work() local
185 q = container_of(work, struct request_queue, delay_work.work); in blk_delay_work()
186 spin_lock_irq(q->queue_lock); in blk_delay_work()
187 __blk_run_queue(q); in blk_delay_work()
188 spin_unlock_irq(q->queue_lock); in blk_delay_work()
201 void blk_delay_queue(struct request_queue *q, unsigned long msecs) in blk_delay_queue() argument
203 if (likely(!blk_queue_dead(q))) in blk_delay_queue()
204 queue_delayed_work(kblockd_workqueue, &q->delay_work, in blk_delay_queue()
218 void blk_start_queue_async(struct request_queue *q) in blk_start_queue_async() argument
220 queue_flag_clear(QUEUE_FLAG_STOPPED, q); in blk_start_queue_async()
221 blk_run_queue_async(q); in blk_start_queue_async()
234 void blk_start_queue(struct request_queue *q) in blk_start_queue() argument
238 queue_flag_clear(QUEUE_FLAG_STOPPED, q); in blk_start_queue()
239 __blk_run_queue(q); in blk_start_queue()
257 void blk_stop_queue(struct request_queue *q) in blk_stop_queue() argument
259 cancel_delayed_work(&q->delay_work); in blk_stop_queue()
260 queue_flag_set(QUEUE_FLAG_STOPPED, q); in blk_stop_queue()
282 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
284 del_timer_sync(&q->timeout); in blk_sync_queue()
286 if (q->mq_ops) { in blk_sync_queue()
290 queue_for_each_hw_ctx(q, hctx, i) { in blk_sync_queue()
295 cancel_delayed_work_sync(&q->delay_work); in blk_sync_queue()
311 inline void __blk_run_queue_uncond(struct request_queue *q) in __blk_run_queue_uncond() argument
313 if (unlikely(blk_queue_dead(q))) in __blk_run_queue_uncond()
323 q->request_fn_active++; in __blk_run_queue_uncond()
324 q->request_fn(q); in __blk_run_queue_uncond()
325 q->request_fn_active--; in __blk_run_queue_uncond()
337 void __blk_run_queue(struct request_queue *q) in __blk_run_queue() argument
339 if (unlikely(blk_queue_stopped(q))) in __blk_run_queue()
342 __blk_run_queue_uncond(q); in __blk_run_queue()
354 void blk_run_queue_async(struct request_queue *q) in blk_run_queue_async() argument
356 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) in blk_run_queue_async()
357 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); in blk_run_queue_async()
369 void blk_run_queue(struct request_queue *q) in blk_run_queue() argument
373 spin_lock_irqsave(q->queue_lock, flags); in blk_run_queue()
374 __blk_run_queue(q); in blk_run_queue()
375 spin_unlock_irqrestore(q->queue_lock, flags); in blk_run_queue()
379 void blk_put_queue(struct request_queue *q) in blk_put_queue() argument
381 kobject_put(&q->kobj); in blk_put_queue()
394 static void __blk_drain_queue(struct request_queue *q, bool drain_all) in __blk_drain_queue() argument
395 __releases(q->queue_lock) in __blk_drain_queue()
396 __acquires(q->queue_lock) in __blk_drain_queue()
400 lockdep_assert_held(q->queue_lock); in __blk_drain_queue()
409 if (q->elevator) in __blk_drain_queue()
410 elv_drain_elevator(q); in __blk_drain_queue()
412 blkcg_drain_queue(q); in __blk_drain_queue()
421 if (!list_empty(&q->queue_head) && q->request_fn) in __blk_drain_queue()
422 __blk_run_queue(q); in __blk_drain_queue()
424 drain |= q->nr_rqs_elvpriv; in __blk_drain_queue()
425 drain |= q->request_fn_active; in __blk_drain_queue()
433 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in __blk_drain_queue()
434 drain |= !list_empty(&q->queue_head); in __blk_drain_queue()
436 drain |= q->nr_rqs[i]; in __blk_drain_queue()
437 drain |= q->in_flight[i]; in __blk_drain_queue()
446 spin_unlock_irq(q->queue_lock); in __blk_drain_queue()
450 spin_lock_irq(q->queue_lock); in __blk_drain_queue()
458 if (q->request_fn) { in __blk_drain_queue()
461 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
477 void blk_queue_bypass_start(struct request_queue *q) in blk_queue_bypass_start() argument
479 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
480 q->bypass_depth++; in blk_queue_bypass_start()
481 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_start()
482 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
489 if (blk_queue_init_done(q)) { in blk_queue_bypass_start()
490 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
491 __blk_drain_queue(q, false); in blk_queue_bypass_start()
492 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
506 void blk_queue_bypass_end(struct request_queue *q) in blk_queue_bypass_end() argument
508 spin_lock_irq(q->queue_lock); in blk_queue_bypass_end()
509 if (!--q->bypass_depth) in blk_queue_bypass_end()
510 queue_flag_clear(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_end()
511 WARN_ON_ONCE(q->bypass_depth < 0); in blk_queue_bypass_end()
512 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_end()
516 void blk_set_queue_dying(struct request_queue *q) in blk_set_queue_dying() argument
518 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); in blk_set_queue_dying()
520 if (q->mq_ops) in blk_set_queue_dying()
521 blk_mq_wake_waiters(q); in blk_set_queue_dying()
525 blk_queue_for_each_rl(rl, q) { in blk_set_queue_dying()
542 void blk_cleanup_queue(struct request_queue *q) in blk_cleanup_queue() argument
544 spinlock_t *lock = q->queue_lock; in blk_cleanup_queue()
547 mutex_lock(&q->sysfs_lock); in blk_cleanup_queue()
548 blk_set_queue_dying(q); in blk_cleanup_queue()
560 q->bypass_depth++; in blk_cleanup_queue()
561 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_cleanup_queue()
563 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in blk_cleanup_queue()
564 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in blk_cleanup_queue()
565 queue_flag_set(QUEUE_FLAG_DYING, q); in blk_cleanup_queue()
567 mutex_unlock(&q->sysfs_lock); in blk_cleanup_queue()
573 blk_freeze_queue(q); in blk_cleanup_queue()
575 if (!q->mq_ops) in blk_cleanup_queue()
576 __blk_drain_queue(q, true); in blk_cleanup_queue()
577 queue_flag_set(QUEUE_FLAG_DEAD, q); in blk_cleanup_queue()
584 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); in blk_cleanup_queue()
585 blk_sync_queue(q); in blk_cleanup_queue()
587 if (q->mq_ops) in blk_cleanup_queue()
588 blk_mq_free_queue(q); in blk_cleanup_queue()
589 percpu_ref_exit(&q->q_usage_counter); in blk_cleanup_queue()
592 if (q->queue_lock != &q->__queue_lock) in blk_cleanup_queue()
593 q->queue_lock = &q->__queue_lock; in blk_cleanup_queue()
596 bdi_unregister(&q->backing_dev_info); in blk_cleanup_queue()
599 blk_put_queue(q); in blk_cleanup_queue()
615 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
621 rl->q = q; in blk_init_rl()
629 (void *)(long)q->node, gfp_mask, in blk_init_rl()
630 q->node); in blk_init_rl()
649 int blk_queue_enter(struct request_queue *q, gfp_t gfp) in blk_queue_enter() argument
654 if (percpu_ref_tryget_live(&q->q_usage_counter)) in blk_queue_enter()
660 ret = wait_event_interruptible(q->mq_freeze_wq, in blk_queue_enter()
661 !atomic_read(&q->mq_freeze_depth) || in blk_queue_enter()
662 blk_queue_dying(q)); in blk_queue_enter()
663 if (blk_queue_dying(q)) in blk_queue_enter()
670 void blk_queue_exit(struct request_queue *q) in blk_queue_exit() argument
672 percpu_ref_put(&q->q_usage_counter); in blk_queue_exit()
677 struct request_queue *q = in blk_queue_usage_counter_release() local
680 wake_up_all(&q->mq_freeze_wq); in blk_queue_usage_counter_release()
685 struct request_queue *q; in blk_alloc_queue_node() local
688 q = kmem_cache_alloc_node(blk_requestq_cachep, in blk_alloc_queue_node()
690 if (!q) in blk_alloc_queue_node()
693 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node()
694 if (q->id < 0) in blk_alloc_queue_node()
697 q->bio_split = bioset_create(BIO_POOL_SIZE, 0); in blk_alloc_queue_node()
698 if (!q->bio_split) in blk_alloc_queue_node()
701 q->backing_dev_info.ra_pages = in blk_alloc_queue_node()
703 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; in blk_alloc_queue_node()
704 q->backing_dev_info.name = "block"; in blk_alloc_queue_node()
705 q->node = node_id; in blk_alloc_queue_node()
707 err = bdi_init(&q->backing_dev_info); in blk_alloc_queue_node()
711 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, in blk_alloc_queue_node()
712 laptop_mode_timer_fn, (unsigned long) q); in blk_alloc_queue_node()
713 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); in blk_alloc_queue_node()
714 INIT_LIST_HEAD(&q->queue_head); in blk_alloc_queue_node()
715 INIT_LIST_HEAD(&q->timeout_list); in blk_alloc_queue_node()
716 INIT_LIST_HEAD(&q->icq_list); in blk_alloc_queue_node()
718 INIT_LIST_HEAD(&q->blkg_list); in blk_alloc_queue_node()
720 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); in blk_alloc_queue_node()
722 kobject_init(&q->kobj, &blk_queue_ktype); in blk_alloc_queue_node()
724 mutex_init(&q->sysfs_lock); in blk_alloc_queue_node()
725 spin_lock_init(&q->__queue_lock); in blk_alloc_queue_node()
731 q->queue_lock = &q->__queue_lock; in blk_alloc_queue_node()
739 q->bypass_depth = 1; in blk_alloc_queue_node()
740 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); in blk_alloc_queue_node()
742 init_waitqueue_head(&q->mq_freeze_wq); in blk_alloc_queue_node()
748 if (percpu_ref_init(&q->q_usage_counter, in blk_alloc_queue_node()
753 if (blkcg_init_queue(q)) in blk_alloc_queue_node()
756 return q; in blk_alloc_queue_node()
759 percpu_ref_exit(&q->q_usage_counter); in blk_alloc_queue_node()
761 bdi_destroy(&q->backing_dev_info); in blk_alloc_queue_node()
763 bioset_free(q->bio_split); in blk_alloc_queue_node()
765 ida_simple_remove(&blk_queue_ida, q->id); in blk_alloc_queue_node()
767 kmem_cache_free(blk_requestq_cachep, q); in blk_alloc_queue_node()
814 struct request_queue *uninit_q, *q; in blk_init_queue_node() local
820 q = blk_init_allocated_queue(uninit_q, rfn, lock); in blk_init_queue_node()
821 if (!q) in blk_init_queue_node()
824 return q; in blk_init_queue_node()
828 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
831 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, in blk_init_allocated_queue() argument
834 if (!q) in blk_init_allocated_queue()
837 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); in blk_init_allocated_queue()
838 if (!q->fq) in blk_init_allocated_queue()
841 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) in blk_init_allocated_queue()
844 q->request_fn = rfn; in blk_init_allocated_queue()
845 q->prep_rq_fn = NULL; in blk_init_allocated_queue()
846 q->unprep_rq_fn = NULL; in blk_init_allocated_queue()
847 q->queue_flags |= QUEUE_FLAG_DEFAULT; in blk_init_allocated_queue()
851 q->queue_lock = lock; in blk_init_allocated_queue()
856 blk_queue_make_request(q, blk_queue_bio); in blk_init_allocated_queue()
858 q->sg_reserved_size = INT_MAX; in blk_init_allocated_queue()
861 mutex_lock(&q->sysfs_lock); in blk_init_allocated_queue()
864 if (elevator_init(q, NULL)) { in blk_init_allocated_queue()
865 mutex_unlock(&q->sysfs_lock); in blk_init_allocated_queue()
869 mutex_unlock(&q->sysfs_lock); in blk_init_allocated_queue()
871 return q; in blk_init_allocated_queue()
874 blk_free_flush_queue(q->fq); in blk_init_allocated_queue()
879 bool blk_get_queue(struct request_queue *q) in blk_get_queue() argument
881 if (likely(!blk_queue_dying(q))) { in blk_get_queue()
882 __blk_get_queue(q); in blk_get_queue()
893 elv_put_request(rl->q, rq); in blk_free_request()
905 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) in ioc_batching() argument
915 return ioc->nr_batch_requests == q->nr_batching || in ioc_batching()
926 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) in ioc_set_batching() argument
928 if (!ioc || ioc_batching(q, ioc)) in ioc_set_batching()
931 ioc->nr_batch_requests = q->nr_batching; in ioc_set_batching()
937 struct request_queue *q = rl->q; in __freed_request() local
939 if (rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
942 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
956 struct request_queue *q = rl->q; in freed_request() local
959 q->nr_rqs[sync]--; in freed_request()
962 q->nr_rqs_elvpriv--; in freed_request()
970 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_update_nr_requests() argument
975 spin_lock_irq(q->queue_lock); in blk_update_nr_requests()
976 q->nr_requests = nr; in blk_update_nr_requests()
977 blk_queue_congestion_threshold(q); in blk_update_nr_requests()
978 on_thresh = queue_congestion_on_threshold(q); in blk_update_nr_requests()
979 off_thresh = queue_congestion_off_threshold(q); in blk_update_nr_requests()
981 blk_queue_for_each_rl(rl, q) { in blk_update_nr_requests()
992 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
999 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
1007 spin_unlock_irq(q->queue_lock); in blk_update_nr_requests()
1063 struct request_queue *q = rl->q; in __get_request() local
1065 struct elevator_type *et = q->elevator->type; in __get_request()
1071 if (unlikely(blk_queue_dying(q))) in __get_request()
1074 may_queue = elv_may_queue(q, rw_flags); in __get_request()
1078 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
1079 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
1087 ioc_set_batching(q, ioc); in __get_request()
1091 && !ioc_batching(q, ioc)) { in __get_request()
1109 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
1112 q->nr_rqs[is_sync]++; in __get_request()
1126 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { in __get_request()
1128 q->nr_rqs_elvpriv++; in __get_request()
1130 icq = ioc_lookup_icq(ioc, q); in __get_request()
1133 if (blk_queue_io_stat(q)) in __get_request()
1135 spin_unlock_irq(q->queue_lock); in __get_request()
1142 blk_rq_init(q, rq); in __get_request()
1150 icq = ioc_create_icq(ioc, q, gfp_mask); in __get_request()
1156 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1170 if (ioc_batching(q, ioc)) in __get_request()
1173 trace_block_getrq(q, bio, rw_flags & 1); in __get_request()
1184 __func__, dev_name(q->backing_dev_info.dev)); in __get_request()
1189 spin_lock_irq(q->queue_lock); in __get_request()
1190 q->nr_rqs_elvpriv--; in __get_request()
1191 spin_unlock_irq(q->queue_lock); in __get_request()
1202 spin_lock_irq(q->queue_lock); in __get_request()
1232 static struct request *get_request(struct request_queue *q, int rw_flags, in get_request() argument
1240 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1246 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { in get_request()
1255 trace_block_sleeprq(q, bio, rw_flags & 1); in get_request()
1257 spin_unlock_irq(q->queue_lock); in get_request()
1265 ioc_set_batching(q, current->io_context); in get_request()
1267 spin_lock_irq(q->queue_lock); in get_request()
1273 static struct request *blk_old_get_request(struct request_queue *q, int rw, in blk_old_get_request() argument
1281 create_io_context(gfp_mask, q->node); in blk_old_get_request()
1283 spin_lock_irq(q->queue_lock); in blk_old_get_request()
1284 rq = get_request(q, rw, NULL, gfp_mask); in blk_old_get_request()
1286 spin_unlock_irq(q->queue_lock); in blk_old_get_request()
1292 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) in blk_get_request() argument
1294 if (q->mq_ops) in blk_get_request()
1295 return blk_mq_alloc_request(q, rw, gfp_mask, false); in blk_get_request()
1297 return blk_old_get_request(q, rw, gfp_mask); in blk_get_request()
1332 struct request *blk_make_request(struct request_queue *q, struct bio *bio, in blk_make_request() argument
1335 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request()
1346 blk_queue_bounce(q, &bounce_bio); in blk_make_request()
1347 ret = blk_rq_append_bio(q, rq, bounce_bio); in blk_make_request()
1383 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1387 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1390 blk_queue_end_tag(q, rq); in blk_requeue_request()
1394 elv_requeue_request(q, rq); in blk_requeue_request()
1398 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1402 __elv_add_request(q, rq, where); in add_acct_request()
1451 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1452 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1461 void __blk_put_request(struct request_queue *q, struct request *req) in __blk_put_request() argument
1463 if (unlikely(!q)) in __blk_put_request()
1466 if (q->mq_ops) { in __blk_put_request()
1473 elv_completed_request(q, req); in __blk_put_request()
1498 struct request_queue *q = req->q; in blk_put_request() local
1500 if (q->mq_ops) in blk_put_request()
1505 spin_lock_irqsave(q->queue_lock, flags); in blk_put_request()
1506 __blk_put_request(q, req); in blk_put_request()
1507 spin_unlock_irqrestore(q->queue_lock, flags); in blk_put_request()
1543 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, in bio_attempt_back_merge() argument
1548 if (!ll_back_merge_fn(q, req, bio)) in bio_attempt_back_merge()
1551 trace_block_bio_backmerge(q, req, bio); in bio_attempt_back_merge()
1565 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, in bio_attempt_front_merge() argument
1570 if (!ll_front_merge_fn(q, req, bio)) in bio_attempt_front_merge()
1573 trace_block_bio_frontmerge(q, req, bio); in bio_attempt_front_merge()
1611 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1625 if (q->mq_ops) in blk_attempt_plug_merge()
1633 if (rq->q == q) { in blk_attempt_plug_merge()
1644 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1649 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1653 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1662 unsigned int blk_plug_queued_count(struct request_queue *q) in blk_plug_queued_count() argument
1673 if (q->mq_ops) in blk_plug_queued_count()
1679 if (rq->q == q) in blk_plug_queued_count()
1697 blk_rq_bio_prep(req->q, req, bio); in init_request_from_bio()
1700 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) in blk_queue_bio() argument
1713 blk_queue_bounce(q, &bio); in blk_queue_bio()
1715 blk_queue_split(q, &bio, q->bio_split); in blk_queue_bio()
1724 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1733 if (!blk_queue_nomerges(q)) { in blk_queue_bio()
1734 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) in blk_queue_bio()
1737 request_count = blk_plug_queued_count(q); in blk_queue_bio()
1739 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1741 el_ret = elv_merge(q, &req, bio); in blk_queue_bio()
1743 if (bio_attempt_back_merge(q, req, bio)) { in blk_queue_bio()
1744 elv_bio_merged(q, req, bio); in blk_queue_bio()
1745 if (!attempt_back_merge(q, req)) in blk_queue_bio()
1746 elv_merged_request(q, req, el_ret); in blk_queue_bio()
1750 if (bio_attempt_front_merge(q, req, bio)) { in blk_queue_bio()
1751 elv_bio_merged(q, req, bio); in blk_queue_bio()
1752 if (!attempt_front_merge(q, req)) in blk_queue_bio()
1753 elv_merged_request(q, req, el_ret); in blk_queue_bio()
1772 req = get_request(q, rw_flags, bio, GFP_NOIO); in blk_queue_bio()
1787 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) in blk_queue_bio()
1797 trace_block_plug(q); in blk_queue_bio()
1801 trace_block_plug(q); in blk_queue_bio()
1807 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1808 add_acct_request(q, req, where); in blk_queue_bio()
1809 __blk_run_queue(q); in blk_queue_bio()
1811 spin_unlock_irq(q->queue_lock); in blk_queue_bio()
1915 struct request_queue *q; in generic_make_request_checks() local
1926 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks()
1927 if (unlikely(!q)) { in generic_make_request_checks()
1956 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { in generic_make_request_checks()
1965 (!blk_queue_discard(q) || in generic_make_request_checks()
1966 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { in generic_make_request_checks()
1982 create_io_context(GFP_ATOMIC, q->node); in generic_make_request_checks()
1984 if (!blkcg_bio_issue_check(q, bio)) in generic_make_request_checks()
1987 trace_block_bio_queue(q, bio); in generic_make_request_checks()
2061 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request() local
2063 if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) { in generic_make_request()
2065 ret = q->make_request_fn(q, bio); in generic_make_request()
2067 blk_queue_exit(q); in generic_make_request()
2149 static int blk_cloned_rq_check_limits(struct request_queue *q, in blk_cloned_rq_check_limits() argument
2152 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { in blk_cloned_rq_check_limits()
2164 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_cloned_rq_check_limits()
2177 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2182 if (blk_cloned_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2189 if (q->mq_ops) { in blk_insert_cloned_request()
2190 if (blk_queue_io_stat(q)) in blk_insert_cloned_request()
2196 spin_lock_irqsave(q->queue_lock, flags); in blk_insert_cloned_request()
2197 if (unlikely(blk_queue_dying(q))) { in blk_insert_cloned_request()
2198 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2211 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2213 __blk_run_queue(q); in blk_insert_cloned_request()
2214 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2309 static struct request *blk_pm_peek_request(struct request_queue *q, in blk_pm_peek_request() argument
2312 if (q->dev && (q->rpm_status == RPM_SUSPENDED || in blk_pm_peek_request()
2313 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) in blk_pm_peek_request()
2319 static inline struct request *blk_pm_peek_request(struct request_queue *q, in blk_pm_peek_request() argument
2378 struct request *blk_peek_request(struct request_queue *q) in blk_peek_request() argument
2383 while ((rq = __elv_next_request(q)) != NULL) { in blk_peek_request()
2385 rq = blk_pm_peek_request(q, rq); in blk_peek_request()
2396 elv_activate_rq(q, rq); in blk_peek_request()
2404 trace_block_rq_issue(q, rq); in blk_peek_request()
2407 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2408 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2409 q->boundary_rq = NULL; in blk_peek_request()
2415 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2425 if (!q->prep_rq_fn) in blk_peek_request()
2428 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2438 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2469 struct request_queue *q = rq->q; in blk_dequeue_request() local
2482 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2533 struct request *blk_fetch_request(struct request_queue *q) in blk_fetch_request() argument
2537 rq = blk_peek_request(q); in blk_fetch_request()
2570 trace_block_rq_complete(req->q, req, nr_bytes); in blk_update_request()
2693 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
2711 struct request_queue *q = req->q; in blk_unprep_request() local
2714 if (q->unprep_rq_fn) in blk_unprep_request()
2715 q->unprep_rq_fn(q, req); in blk_unprep_request()
2725 blk_queue_end_tag(req->q, req); in blk_finish_request()
2730 laptop_io_completion(&req->q->backing_dev_info); in blk_finish_request()
2743 __blk_put_request(req->next_rq->q, req->next_rq); in blk_finish_request()
2745 __blk_put_request(req->q, req); in blk_finish_request()
2770 struct request_queue *q = rq->q; in blk_end_bidi_request() local
2776 spin_lock_irqsave(q->queue_lock, flags); in blk_end_bidi_request()
2778 spin_unlock_irqrestore(q->queue_lock, flags); in blk_end_bidi_request()
2966 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
2973 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
3020 int blk_lld_busy(struct request_queue *q) in blk_lld_busy() argument
3022 if (q->lld_busy_fn) in blk_lld_busy()
3023 return q->lld_busy_fn(q); in blk_lld_busy()
3181 return !(rqa->q < rqb->q || in plug_rq_cmp()
3182 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); in plug_rq_cmp()
3191 static void queue_unplugged(struct request_queue *q, unsigned int depth, in queue_unplugged() argument
3193 __releases(q->queue_lock) in queue_unplugged()
3195 trace_block_unplug(q, depth, !from_schedule); in queue_unplugged()
3198 blk_run_queue_async(q); in queue_unplugged()
3200 __blk_run_queue(q); in queue_unplugged()
3201 spin_unlock(q->queue_lock); in queue_unplugged()
3248 struct request_queue *q; in blk_flush_plug_list() local
3266 q = NULL; in blk_flush_plug_list()
3277 BUG_ON(!rq->q); in blk_flush_plug_list()
3278 if (rq->q != q) { in blk_flush_plug_list()
3282 if (q) in blk_flush_plug_list()
3283 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3284 q = rq->q; in blk_flush_plug_list()
3286 spin_lock(q->queue_lock); in blk_flush_plug_list()
3292 if (unlikely(blk_queue_dying(q))) { in blk_flush_plug_list()
3301 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3303 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()
3311 if (q) in blk_flush_plug_list()
3312 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3327 bool blk_poll(struct request_queue *q, blk_qc_t cookie) in blk_poll() argument
3332 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) || in blk_poll()
3333 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in blk_poll()
3343 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num]; in blk_poll()
3348 ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie)); in blk_poll()
3390 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
3392 q->dev = dev; in blk_pm_runtime_init()
3393 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
3394 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
3395 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
3420 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
3424 if (!q->dev) in blk_pre_runtime_suspend()
3427 spin_lock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3428 if (q->nr_pending) { in blk_pre_runtime_suspend()
3430 pm_runtime_mark_last_busy(q->dev); in blk_pre_runtime_suspend()
3432 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
3434 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3452 void blk_post_runtime_suspend(struct request_queue *q, int err) in blk_post_runtime_suspend() argument
3454 if (!q->dev) in blk_post_runtime_suspend()
3457 spin_lock_irq(q->queue_lock); in blk_post_runtime_suspend()
3459 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_suspend()
3461 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_suspend()
3462 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_suspend()
3464 spin_unlock_irq(q->queue_lock); in blk_post_runtime_suspend()
3479 void blk_pre_runtime_resume(struct request_queue *q) in blk_pre_runtime_resume() argument
3481 if (!q->dev) in blk_pre_runtime_resume()
3484 spin_lock_irq(q->queue_lock); in blk_pre_runtime_resume()
3485 q->rpm_status = RPM_RESUMING; in blk_pre_runtime_resume()
3486 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_resume()
3504 void blk_post_runtime_resume(struct request_queue *q, int err) in blk_post_runtime_resume() argument
3506 if (!q->dev) in blk_post_runtime_resume()
3509 spin_lock_irq(q->queue_lock); in blk_post_runtime_resume()
3511 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_resume()
3512 __blk_run_queue(q); in blk_post_runtime_resume()
3513 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_resume()
3514 pm_request_autosuspend(q->dev); in blk_post_runtime_resume()
3516 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_resume()
3518 spin_unlock_irq(q->queue_lock); in blk_post_runtime_resume()