Lines Matching refs:q

66 void blk_queue_congestion_threshold(struct request_queue *q)  in blk_queue_congestion_threshold()  argument
70 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold()
71 if (nr > q->nr_requests) in blk_queue_congestion_threshold()
72 nr = q->nr_requests; in blk_queue_congestion_threshold()
73 q->nr_congestion_on = nr; in blk_queue_congestion_threshold()
75 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold()
78 q->nr_congestion_off = nr; in blk_queue_congestion_threshold()
91 struct request_queue *q = bdev_get_queue(bdev); in blk_get_backing_dev_info() local
93 return &q->backing_dev_info; in blk_get_backing_dev_info()
97 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
104 rq->q = q; in blk_rq_init()
160 struct request_queue *q; in blk_delay_work() local
162 q = container_of(work, struct request_queue, delay_work.work); in blk_delay_work()
163 spin_lock_irq(q->queue_lock); in blk_delay_work()
164 __blk_run_queue(q); in blk_delay_work()
165 spin_unlock_irq(q->queue_lock); in blk_delay_work()
178 void blk_delay_queue(struct request_queue *q, unsigned long msecs) in blk_delay_queue() argument
180 if (likely(!blk_queue_dead(q))) in blk_delay_queue()
181 queue_delayed_work(kblockd_workqueue, &q->delay_work, in blk_delay_queue()
195 void blk_start_queue(struct request_queue *q) in blk_start_queue() argument
199 queue_flag_clear(QUEUE_FLAG_STOPPED, q); in blk_start_queue()
200 __blk_run_queue(q); in blk_start_queue()
218 void blk_stop_queue(struct request_queue *q) in blk_stop_queue() argument
220 cancel_delayed_work(&q->delay_work); in blk_stop_queue()
221 queue_flag_set(QUEUE_FLAG_STOPPED, q); in blk_stop_queue()
243 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
245 del_timer_sync(&q->timeout); in blk_sync_queue()
247 if (q->mq_ops) { in blk_sync_queue()
251 queue_for_each_hw_ctx(q, hctx, i) { in blk_sync_queue()
256 cancel_delayed_work_sync(&q->delay_work); in blk_sync_queue()
272 inline void __blk_run_queue_uncond(struct request_queue *q) in __blk_run_queue_uncond() argument
274 if (unlikely(blk_queue_dead(q))) in __blk_run_queue_uncond()
284 q->request_fn_active++; in __blk_run_queue_uncond()
285 q->request_fn(q); in __blk_run_queue_uncond()
286 q->request_fn_active--; in __blk_run_queue_uncond()
297 void __blk_run_queue(struct request_queue *q) in __blk_run_queue() argument
299 if (unlikely(blk_queue_stopped(q))) in __blk_run_queue()
302 __blk_run_queue_uncond(q); in __blk_run_queue()
314 void blk_run_queue_async(struct request_queue *q) in blk_run_queue_async() argument
316 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) in blk_run_queue_async()
317 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); in blk_run_queue_async()
329 void blk_run_queue(struct request_queue *q) in blk_run_queue() argument
333 spin_lock_irqsave(q->queue_lock, flags); in blk_run_queue()
334 __blk_run_queue(q); in blk_run_queue()
335 spin_unlock_irqrestore(q->queue_lock, flags); in blk_run_queue()
339 void blk_put_queue(struct request_queue *q) in blk_put_queue() argument
341 kobject_put(&q->kobj); in blk_put_queue()
354 static void __blk_drain_queue(struct request_queue *q, bool drain_all) in __blk_drain_queue() argument
355 __releases(q->queue_lock) in __blk_drain_queue()
356 __acquires(q->queue_lock) in __blk_drain_queue()
360 lockdep_assert_held(q->queue_lock); in __blk_drain_queue()
369 if (q->elevator) in __blk_drain_queue()
370 elv_drain_elevator(q); in __blk_drain_queue()
372 blkcg_drain_queue(q); in __blk_drain_queue()
381 if (!list_empty(&q->queue_head) && q->request_fn) in __blk_drain_queue()
382 __blk_run_queue(q); in __blk_drain_queue()
384 drain |= q->nr_rqs_elvpriv; in __blk_drain_queue()
385 drain |= q->request_fn_active; in __blk_drain_queue()
393 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in __blk_drain_queue()
394 drain |= !list_empty(&q->queue_head); in __blk_drain_queue()
396 drain |= q->nr_rqs[i]; in __blk_drain_queue()
397 drain |= q->in_flight[i]; in __blk_drain_queue()
406 spin_unlock_irq(q->queue_lock); in __blk_drain_queue()
410 spin_lock_irq(q->queue_lock); in __blk_drain_queue()
418 if (q->request_fn) { in __blk_drain_queue()
421 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
437 void blk_queue_bypass_start(struct request_queue *q) in blk_queue_bypass_start() argument
439 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
440 q->bypass_depth++; in blk_queue_bypass_start()
441 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_start()
442 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
449 if (blk_queue_init_done(q)) { in blk_queue_bypass_start()
450 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
451 __blk_drain_queue(q, false); in blk_queue_bypass_start()
452 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
466 void blk_queue_bypass_end(struct request_queue *q) in blk_queue_bypass_end() argument
468 spin_lock_irq(q->queue_lock); in blk_queue_bypass_end()
469 if (!--q->bypass_depth) in blk_queue_bypass_end()
470 queue_flag_clear(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_end()
471 WARN_ON_ONCE(q->bypass_depth < 0); in blk_queue_bypass_end()
472 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_end()
476 void blk_set_queue_dying(struct request_queue *q) in blk_set_queue_dying() argument
478 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); in blk_set_queue_dying()
480 if (q->mq_ops) in blk_set_queue_dying()
481 blk_mq_wake_waiters(q); in blk_set_queue_dying()
485 blk_queue_for_each_rl(rl, q) { in blk_set_queue_dying()
502 void blk_cleanup_queue(struct request_queue *q) in blk_cleanup_queue() argument
504 spinlock_t *lock = q->queue_lock; in blk_cleanup_queue()
507 mutex_lock(&q->sysfs_lock); in blk_cleanup_queue()
508 blk_set_queue_dying(q); in blk_cleanup_queue()
520 q->bypass_depth++; in blk_cleanup_queue()
521 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_cleanup_queue()
523 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in blk_cleanup_queue()
524 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in blk_cleanup_queue()
525 queue_flag_set(QUEUE_FLAG_DYING, q); in blk_cleanup_queue()
527 mutex_unlock(&q->sysfs_lock); in blk_cleanup_queue()
533 if (q->mq_ops) { in blk_cleanup_queue()
534 blk_mq_freeze_queue(q); in blk_cleanup_queue()
538 __blk_drain_queue(q, true); in blk_cleanup_queue()
540 queue_flag_set(QUEUE_FLAG_DEAD, q); in blk_cleanup_queue()
544 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); in blk_cleanup_queue()
545 blk_sync_queue(q); in blk_cleanup_queue()
547 if (q->mq_ops) in blk_cleanup_queue()
548 blk_mq_free_queue(q); in blk_cleanup_queue()
551 if (q->queue_lock != &q->__queue_lock) in blk_cleanup_queue()
552 q->queue_lock = &q->__queue_lock; in blk_cleanup_queue()
555 bdi_destroy(&q->backing_dev_info); in blk_cleanup_queue()
558 blk_put_queue(q); in blk_cleanup_queue()
574 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
580 rl->q = q; in blk_init_rl()
588 (void *)(long)q->node, gfp_mask, in blk_init_rl()
589 q->node); in blk_init_rl()
610 struct request_queue *q; in blk_alloc_queue_node() local
613 q = kmem_cache_alloc_node(blk_requestq_cachep, in blk_alloc_queue_node()
615 if (!q) in blk_alloc_queue_node()
618 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node()
619 if (q->id < 0) in blk_alloc_queue_node()
622 q->backing_dev_info.ra_pages = in blk_alloc_queue_node()
624 q->backing_dev_info.state = 0; in blk_alloc_queue_node()
625 q->backing_dev_info.capabilities = 0; in blk_alloc_queue_node()
626 q->backing_dev_info.name = "block"; in blk_alloc_queue_node()
627 q->node = node_id; in blk_alloc_queue_node()
629 err = bdi_init(&q->backing_dev_info); in blk_alloc_queue_node()
633 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, in blk_alloc_queue_node()
634 laptop_mode_timer_fn, (unsigned long) q); in blk_alloc_queue_node()
635 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); in blk_alloc_queue_node()
636 INIT_LIST_HEAD(&q->queue_head); in blk_alloc_queue_node()
637 INIT_LIST_HEAD(&q->timeout_list); in blk_alloc_queue_node()
638 INIT_LIST_HEAD(&q->icq_list); in blk_alloc_queue_node()
640 INIT_LIST_HEAD(&q->blkg_list); in blk_alloc_queue_node()
642 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); in blk_alloc_queue_node()
644 kobject_init(&q->kobj, &blk_queue_ktype); in blk_alloc_queue_node()
646 mutex_init(&q->sysfs_lock); in blk_alloc_queue_node()
647 spin_lock_init(&q->__queue_lock); in blk_alloc_queue_node()
653 q->queue_lock = &q->__queue_lock; in blk_alloc_queue_node()
661 q->bypass_depth = 1; in blk_alloc_queue_node()
662 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); in blk_alloc_queue_node()
664 init_waitqueue_head(&q->mq_freeze_wq); in blk_alloc_queue_node()
666 if (blkcg_init_queue(q)) in blk_alloc_queue_node()
669 return q; in blk_alloc_queue_node()
672 bdi_destroy(&q->backing_dev_info); in blk_alloc_queue_node()
674 ida_simple_remove(&blk_queue_ida, q->id); in blk_alloc_queue_node()
676 kmem_cache_free(blk_requestq_cachep, q); in blk_alloc_queue_node()
723 struct request_queue *uninit_q, *q; in blk_init_queue_node() local
729 q = blk_init_allocated_queue(uninit_q, rfn, lock); in blk_init_queue_node()
730 if (!q) in blk_init_queue_node()
733 return q; in blk_init_queue_node()
737 static void blk_queue_bio(struct request_queue *q, struct bio *bio);
740 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, in blk_init_allocated_queue() argument
743 if (!q) in blk_init_allocated_queue()
746 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); in blk_init_allocated_queue()
747 if (!q->fq) in blk_init_allocated_queue()
750 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) in blk_init_allocated_queue()
753 q->request_fn = rfn; in blk_init_allocated_queue()
754 q->prep_rq_fn = NULL; in blk_init_allocated_queue()
755 q->unprep_rq_fn = NULL; in blk_init_allocated_queue()
756 q->queue_flags |= QUEUE_FLAG_DEFAULT; in blk_init_allocated_queue()
760 q->queue_lock = lock; in blk_init_allocated_queue()
765 blk_queue_make_request(q, blk_queue_bio); in blk_init_allocated_queue()
767 q->sg_reserved_size = INT_MAX; in blk_init_allocated_queue()
770 mutex_lock(&q->sysfs_lock); in blk_init_allocated_queue()
773 if (elevator_init(q, NULL)) { in blk_init_allocated_queue()
774 mutex_unlock(&q->sysfs_lock); in blk_init_allocated_queue()
778 mutex_unlock(&q->sysfs_lock); in blk_init_allocated_queue()
780 return q; in blk_init_allocated_queue()
783 blk_free_flush_queue(q->fq); in blk_init_allocated_queue()
788 bool blk_get_queue(struct request_queue *q) in blk_get_queue() argument
790 if (likely(!blk_queue_dying(q))) { in blk_get_queue()
791 __blk_get_queue(q); in blk_get_queue()
802 elv_put_request(rl->q, rq); in blk_free_request()
814 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) in ioc_batching() argument
824 return ioc->nr_batch_requests == q->nr_batching || in ioc_batching()
835 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) in ioc_set_batching() argument
837 if (!ioc || ioc_batching(q, ioc)) in ioc_set_batching()
840 ioc->nr_batch_requests = q->nr_batching; in ioc_set_batching()
846 struct request_queue *q = rl->q; in __freed_request() local
852 if (rl == &q->root_rl && in __freed_request()
853 rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
854 blk_clear_queue_congested(q, sync); in __freed_request()
856 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
870 struct request_queue *q = rl->q; in freed_request() local
873 q->nr_rqs[sync]--; in freed_request()
876 q->nr_rqs_elvpriv--; in freed_request()
884 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_update_nr_requests() argument
888 spin_lock_irq(q->queue_lock); in blk_update_nr_requests()
889 q->nr_requests = nr; in blk_update_nr_requests()
890 blk_queue_congestion_threshold(q); in blk_update_nr_requests()
893 rl = &q->root_rl; in blk_update_nr_requests()
895 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) in blk_update_nr_requests()
896 blk_set_queue_congested(q, BLK_RW_SYNC); in blk_update_nr_requests()
897 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) in blk_update_nr_requests()
898 blk_clear_queue_congested(q, BLK_RW_SYNC); in blk_update_nr_requests()
900 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) in blk_update_nr_requests()
901 blk_set_queue_congested(q, BLK_RW_ASYNC); in blk_update_nr_requests()
902 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) in blk_update_nr_requests()
903 blk_clear_queue_congested(q, BLK_RW_ASYNC); in blk_update_nr_requests()
905 blk_queue_for_each_rl(rl, q) { in blk_update_nr_requests()
906 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
913 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
921 spin_unlock_irq(q->queue_lock); in blk_update_nr_requests()
977 struct request_queue *q = rl->q; in __get_request() local
979 struct elevator_type *et = q->elevator->type; in __get_request()
985 if (unlikely(blk_queue_dying(q))) in __get_request()
988 may_queue = elv_may_queue(q, rw_flags); in __get_request()
992 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
993 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
1001 ioc_set_batching(q, ioc); in __get_request()
1005 && !ioc_batching(q, ioc)) { in __get_request()
1019 if (rl == &q->root_rl) in __get_request()
1020 blk_set_queue_congested(q, is_sync); in __get_request()
1028 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
1031 q->nr_rqs[is_sync]++; in __get_request()
1045 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { in __get_request()
1047 q->nr_rqs_elvpriv++; in __get_request()
1049 icq = ioc_lookup_icq(ioc, q); in __get_request()
1052 if (blk_queue_io_stat(q)) in __get_request()
1054 spin_unlock_irq(q->queue_lock); in __get_request()
1061 blk_rq_init(q, rq); in __get_request()
1069 icq = ioc_create_icq(ioc, q, gfp_mask); in __get_request()
1075 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1089 if (ioc_batching(q, ioc)) in __get_request()
1092 trace_block_getrq(q, bio, rw_flags & 1); in __get_request()
1103 __func__, dev_name(q->backing_dev_info.dev)); in __get_request()
1108 spin_lock_irq(q->queue_lock); in __get_request()
1109 q->nr_rqs_elvpriv--; in __get_request()
1110 spin_unlock_irq(q->queue_lock); in __get_request()
1121 spin_lock_irq(q->queue_lock); in __get_request()
1151 static struct request *get_request(struct request_queue *q, int rw_flags, in get_request() argument
1159 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1165 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { in get_request()
1174 trace_block_sleeprq(q, bio, rw_flags & 1); in get_request()
1176 spin_unlock_irq(q->queue_lock); in get_request()
1184 ioc_set_batching(q, current->io_context); in get_request()
1186 spin_lock_irq(q->queue_lock); in get_request()
1192 static struct request *blk_old_get_request(struct request_queue *q, int rw, in blk_old_get_request() argument
1200 create_io_context(gfp_mask, q->node); in blk_old_get_request()
1202 spin_lock_irq(q->queue_lock); in blk_old_get_request()
1203 rq = get_request(q, rw, NULL, gfp_mask); in blk_old_get_request()
1205 spin_unlock_irq(q->queue_lock); in blk_old_get_request()
1211 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) in blk_get_request() argument
1213 if (q->mq_ops) in blk_get_request()
1214 return blk_mq_alloc_request(q, rw, gfp_mask, false); in blk_get_request()
1216 return blk_old_get_request(q, rw, gfp_mask); in blk_get_request()
1251 struct request *blk_make_request(struct request_queue *q, struct bio *bio, in blk_make_request() argument
1254 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request()
1265 blk_queue_bounce(q, &bounce_bio); in blk_make_request()
1266 ret = blk_rq_append_bio(q, rq, bounce_bio); in blk_make_request()
1302 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1306 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1309 blk_queue_end_tag(q, rq); in blk_requeue_request()
1313 elv_requeue_request(q, rq); in blk_requeue_request()
1317 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1321 __elv_add_request(q, rq, where); in add_acct_request()
1370 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1371 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1380 void __blk_put_request(struct request_queue *q, struct request *req) in __blk_put_request() argument
1382 if (unlikely(!q)) in __blk_put_request()
1385 if (q->mq_ops) { in __blk_put_request()
1392 elv_completed_request(q, req); in __blk_put_request()
1417 struct request_queue *q = req->q; in blk_put_request() local
1419 if (q->mq_ops) in blk_put_request()
1424 spin_lock_irqsave(q->queue_lock, flags); in blk_put_request()
1425 __blk_put_request(q, req); in blk_put_request()
1426 spin_unlock_irqrestore(q->queue_lock, flags); in blk_put_request()
1462 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, in bio_attempt_back_merge() argument
1467 if (!ll_back_merge_fn(q, req, bio)) in bio_attempt_back_merge()
1470 trace_block_bio_backmerge(q, req, bio); in bio_attempt_back_merge()
1484 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, in bio_attempt_front_merge() argument
1489 if (!ll_front_merge_fn(q, req, bio)) in bio_attempt_front_merge()
1492 trace_block_bio_frontmerge(q, req, bio); in bio_attempt_front_merge()
1527 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1540 if (q->mq_ops) in blk_attempt_plug_merge()
1548 if (rq->q == q) in blk_attempt_plug_merge()
1551 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1556 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1560 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1580 blk_rq_bio_prep(req->q, req, bio); in init_request_from_bio()
1583 static void blk_queue_bio(struct request_queue *q, struct bio *bio) in blk_queue_bio() argument
1596 blk_queue_bounce(q, &bio); in blk_queue_bio()
1604 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1613 if (!blk_queue_nomerges(q) && in blk_queue_bio()
1614 blk_attempt_plug_merge(q, bio, &request_count)) in blk_queue_bio()
1617 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1619 el_ret = elv_merge(q, &req, bio); in blk_queue_bio()
1621 if (bio_attempt_back_merge(q, req, bio)) { in blk_queue_bio()
1622 elv_bio_merged(q, req, bio); in blk_queue_bio()
1623 if (!attempt_back_merge(q, req)) in blk_queue_bio()
1624 elv_merged_request(q, req, el_ret); in blk_queue_bio()
1628 if (bio_attempt_front_merge(q, req, bio)) { in blk_queue_bio()
1629 elv_bio_merged(q, req, bio); in blk_queue_bio()
1630 if (!attempt_front_merge(q, req)) in blk_queue_bio()
1631 elv_merged_request(q, req, el_ret); in blk_queue_bio()
1650 req = get_request(q, rw_flags, bio, GFP_NOIO); in blk_queue_bio()
1664 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) in blk_queue_bio()
1674 trace_block_plug(q); in blk_queue_bio()
1678 trace_block_plug(q); in blk_queue_bio()
1684 spin_lock_irq(q->queue_lock); in blk_queue_bio()
1685 add_acct_request(q, req, where); in blk_queue_bio()
1686 __blk_run_queue(q); in blk_queue_bio()
1688 spin_unlock_irq(q->queue_lock); in blk_queue_bio()
1792 struct request_queue *q; in generic_make_request_checks() local
1803 q = bdev_get_queue(bio->bi_bdev); in generic_make_request_checks()
1804 if (unlikely(!q)) { in generic_make_request_checks()
1814 nr_sectors > queue_max_hw_sectors(q))) { in generic_make_request_checks()
1818 queue_max_hw_sectors(q)); in generic_make_request_checks()
1842 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { in generic_make_request_checks()
1851 (!blk_queue_discard(q) || in generic_make_request_checks()
1852 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { in generic_make_request_checks()
1868 create_io_context(GFP_ATOMIC, q->node); in generic_make_request_checks()
1870 if (blk_throtl_bio(q, bio)) in generic_make_request_checks()
1873 trace_block_bio_queue(q, bio); in generic_make_request_checks()
1945 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in generic_make_request() local
1947 q->make_request_fn(q, bio); in generic_make_request()
2024 int blk_rq_check_limits(struct request_queue *q, struct request *rq) in blk_rq_check_limits() argument
2029 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { in blk_rq_check_limits()
2041 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_rq_check_limits()
2055 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2060 if (blk_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2067 if (q->mq_ops) { in blk_insert_cloned_request()
2068 if (blk_queue_io_stat(q)) in blk_insert_cloned_request()
2074 spin_lock_irqsave(q->queue_lock, flags); in blk_insert_cloned_request()
2075 if (unlikely(blk_queue_dying(q))) { in blk_insert_cloned_request()
2076 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2089 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2091 __blk_run_queue(q); in blk_insert_cloned_request()
2092 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2187 static struct request *blk_pm_peek_request(struct request_queue *q, in blk_pm_peek_request() argument
2190 if (q->dev && (q->rpm_status == RPM_SUSPENDED || in blk_pm_peek_request()
2191 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) in blk_pm_peek_request()
2197 static inline struct request *blk_pm_peek_request(struct request_queue *q, in blk_pm_peek_request() argument
2256 struct request *blk_peek_request(struct request_queue *q) in blk_peek_request() argument
2261 while ((rq = __elv_next_request(q)) != NULL) { in blk_peek_request()
2263 rq = blk_pm_peek_request(q, rq); in blk_peek_request()
2274 elv_activate_rq(q, rq); in blk_peek_request()
2282 trace_block_rq_issue(q, rq); in blk_peek_request()
2285 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2286 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2287 q->boundary_rq = NULL; in blk_peek_request()
2293 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2303 if (!q->prep_rq_fn) in blk_peek_request()
2306 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2316 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2347 struct request_queue *q = rq->q; in blk_dequeue_request() local
2360 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2411 struct request *blk_fetch_request(struct request_queue *q) in blk_fetch_request() argument
2415 rq = blk_peek_request(q); in blk_fetch_request()
2448 trace_block_rq_complete(req->q, req, nr_bytes); in blk_update_request()
2571 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
2589 struct request_queue *q = req->q; in blk_unprep_request() local
2592 if (q->unprep_rq_fn) in blk_unprep_request()
2593 q->unprep_rq_fn(q, req); in blk_unprep_request()
2603 blk_queue_end_tag(req->q, req); in blk_finish_request()
2608 laptop_io_completion(&req->q->backing_dev_info); in blk_finish_request()
2621 __blk_put_request(req->next_rq->q, req->next_rq); in blk_finish_request()
2623 __blk_put_request(req->q, req); in blk_finish_request()
2648 struct request_queue *q = rq->q; in blk_end_bidi_request() local
2654 spin_lock_irqsave(q->queue_lock, flags); in blk_end_bidi_request()
2656 spin_unlock_irqrestore(q->queue_lock, flags); in blk_end_bidi_request()
2844 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
2851 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
2898 int blk_lld_busy(struct request_queue *q) in blk_lld_busy() argument
2900 if (q->lld_busy_fn) in blk_lld_busy()
2901 return q->lld_busy_fn(q); in blk_lld_busy()
3060 return !(rqa->q < rqb->q || in plug_rq_cmp()
3061 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); in plug_rq_cmp()
3070 static void queue_unplugged(struct request_queue *q, unsigned int depth, in queue_unplugged() argument
3072 __releases(q->queue_lock) in queue_unplugged()
3074 trace_block_unplug(q, depth, !from_schedule); in queue_unplugged()
3077 blk_run_queue_async(q); in queue_unplugged()
3079 __blk_run_queue(q); in queue_unplugged()
3080 spin_unlock(q->queue_lock); in queue_unplugged()
3127 struct request_queue *q; in blk_flush_plug_list() local
3145 q = NULL; in blk_flush_plug_list()
3156 BUG_ON(!rq->q); in blk_flush_plug_list()
3157 if (rq->q != q) { in blk_flush_plug_list()
3161 if (q) in blk_flush_plug_list()
3162 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3163 q = rq->q; in blk_flush_plug_list()
3165 spin_lock(q->queue_lock); in blk_flush_plug_list()
3171 if (unlikely(blk_queue_dying(q))) { in blk_flush_plug_list()
3180 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3182 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()
3190 if (q) in blk_flush_plug_list()
3191 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3227 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
3229 q->dev = dev; in blk_pm_runtime_init()
3230 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
3231 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
3232 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
3257 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
3261 spin_lock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3262 if (q->nr_pending) { in blk_pre_runtime_suspend()
3264 pm_runtime_mark_last_busy(q->dev); in blk_pre_runtime_suspend()
3266 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
3268 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3286 void blk_post_runtime_suspend(struct request_queue *q, int err) in blk_post_runtime_suspend() argument
3288 spin_lock_irq(q->queue_lock); in blk_post_runtime_suspend()
3290 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_suspend()
3292 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_suspend()
3293 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_suspend()
3295 spin_unlock_irq(q->queue_lock); in blk_post_runtime_suspend()
3310 void blk_pre_runtime_resume(struct request_queue *q) in blk_pre_runtime_resume() argument
3312 spin_lock_irq(q->queue_lock); in blk_pre_runtime_resume()
3313 q->rpm_status = RPM_RESUMING; in blk_pre_runtime_resume()
3314 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_resume()
3332 void blk_post_runtime_resume(struct request_queue *q, int err) in blk_post_runtime_resume() argument
3334 spin_lock_irq(q->queue_lock); in blk_post_runtime_resume()
3336 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_resume()
3337 __blk_run_queue(q); in blk_post_runtime_resume()
3338 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_resume()
3339 pm_request_autosuspend(q->dev); in blk_post_runtime_resume()
3341 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_resume()
3343 spin_unlock_irq(q->queue_lock); in blk_post_runtime_resume()