Lines Matching refs:q
56 struct request_queue *q; /* the queue this rl belongs to */ member
95 struct request_queue *q; member
211 typedef void (request_fn_proc) (struct request_queue *q);
212 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
219 typedef int (lld_busy_fn) (struct request_queue *q);
502 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held() argument
504 if (q->queue_lock) in queue_lockdep_assert_held()
505 lockdep_assert_held(q->queue_lock); in queue_lockdep_assert_held()
509 struct request_queue *q) in queue_flag_set_unlocked() argument
511 __set_bit(flag, &q->queue_flags); in queue_flag_set_unlocked()
515 struct request_queue *q) in queue_flag_test_and_clear() argument
517 queue_lockdep_assert_held(q); in queue_flag_test_and_clear()
519 if (test_bit(flag, &q->queue_flags)) { in queue_flag_test_and_clear()
520 __clear_bit(flag, &q->queue_flags); in queue_flag_test_and_clear()
528 struct request_queue *q) in queue_flag_test_and_set() argument
530 queue_lockdep_assert_held(q); in queue_flag_test_and_set()
532 if (!test_bit(flag, &q->queue_flags)) { in queue_flag_test_and_set()
533 __set_bit(flag, &q->queue_flags); in queue_flag_test_and_set()
540 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) in queue_flag_set() argument
542 queue_lockdep_assert_held(q); in queue_flag_set()
543 __set_bit(flag, &q->queue_flags); in queue_flag_set()
547 struct request_queue *q) in queue_flag_clear_unlocked() argument
549 __clear_bit(flag, &q->queue_flags); in queue_flag_clear_unlocked()
552 static inline int queue_in_flight(struct request_queue *q) in queue_in_flight() argument
554 return q->in_flight[0] + q->in_flight[1]; in queue_in_flight()
557 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) in queue_flag_clear() argument
559 queue_lockdep_assert_held(q); in queue_flag_clear()
560 __clear_bit(flag, &q->queue_flags); in queue_flag_clear()
563 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) argument
564 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) argument
565 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) argument
566 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) argument
567 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) argument
568 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) argument
569 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) argument
570 #define blk_queue_noxmerges(q) \ argument
571 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
572 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) argument
573 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) argument
574 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) argument
575 #define blk_queue_stackable(q) \ argument
576 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
577 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) argument
578 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ argument
579 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
602 static inline bool queue_is_rq_based(struct request_queue *q) in queue_is_rq_based() argument
604 return q->request_fn || q->mq_ops; in queue_is_rq_based()
607 static inline unsigned int blk_queue_cluster(struct request_queue *q) in blk_queue_cluster() argument
609 return q->limits.cluster; in blk_queue_cluster()
713 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
719 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) in blk_queue_bounce() argument
767 extern void blk_rq_init(struct request_queue *q, struct request *rq);
777 extern int blk_lld_busy(struct request_queue *q);
783 extern int blk_insert_cloned_request(struct request_queue *q,
797 extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
798 extern void blk_queue_exit(struct request_queue *q);
799 extern void blk_start_queue(struct request_queue *q);
800 extern void blk_start_queue_async(struct request_queue *q);
801 extern void blk_stop_queue(struct request_queue *q);
802 extern void blk_sync_queue(struct request_queue *q);
803 extern void __blk_stop_queue(struct request_queue *q);
804 extern void __blk_run_queue(struct request_queue *q);
805 extern void __blk_run_queue_uncond(struct request_queue *q);
807 extern void blk_run_queue_async(struct request_queue *q);
821 bool blk_poll(struct request_queue *q, blk_qc_t cookie);
863 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors() argument
867 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_queue_get_max_sectors()
870 return q->limits.max_write_same_sectors; in blk_queue_get_max_sectors()
872 return q->limits.max_sectors; in blk_queue_get_max_sectors()
879 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset() argument
882 if (!q->limits.chunk_sectors) in blk_max_size_offset()
883 return q->limits.max_sectors; in blk_max_size_offset()
885 return q->limits.chunk_sectors - in blk_max_size_offset()
886 (offset & (q->limits.chunk_sectors - 1)); in blk_max_size_offset()
891 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
894 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
896 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) in blk_rq_get_max_sectors()
897 return blk_queue_get_max_sectors(q, rq->cmd_flags); in blk_rq_get_max_sectors()
899 return min(blk_max_size_offset(q, blk_rq_pos(rq)), in blk_rq_get_max_sectors()
900 blk_queue_get_max_sectors(q, rq->cmd_flags)); in blk_rq_get_max_sectors()
917 extern struct request *blk_peek_request(struct request_queue *q);
919 extern struct request *blk_fetch_request(struct request_queue *q);
968 extern void blk_queue_max_discard_sectors(struct request_queue *q,
970 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
974 extern void blk_queue_alignment_offset(struct request_queue *q,
977 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
979 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
991 extern int blk_queue_dma_drain(struct request_queue *q,
994 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1004 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
1005 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1022 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1023 extern int blk_pre_runtime_suspend(struct request_queue *q);
1024 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1025 extern void blk_pre_runtime_resume(struct request_queue *q);
1026 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1028 static inline void blk_pm_runtime_init(struct request_queue *q, in blk_pm_runtime_init() argument
1030 static inline int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
1034 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} in blk_post_runtime_suspend() argument
1035 static inline void blk_pre_runtime_resume(struct request_queue *q) {} in blk_pre_runtime_resume() argument
1036 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} in blk_post_runtime_resume() argument
1155 static inline unsigned long queue_bounce_pfn(struct request_queue *q) in queue_bounce_pfn() argument
1157 return q->limits.bounce_pfn; in queue_bounce_pfn()
1160 static inline unsigned long queue_segment_boundary(struct request_queue *q) in queue_segment_boundary() argument
1162 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1165 static inline unsigned long queue_virt_boundary(struct request_queue *q) in queue_virt_boundary() argument
1167 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1170 static inline unsigned int queue_max_sectors(struct request_queue *q) in queue_max_sectors() argument
1172 return q->limits.max_sectors; in queue_max_sectors()
1175 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) in queue_max_hw_sectors() argument
1177 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1180 static inline unsigned short queue_max_segments(struct request_queue *q) in queue_max_segments() argument
1182 return q->limits.max_segments; in queue_max_segments()
1185 static inline unsigned int queue_max_segment_size(struct request_queue *q) in queue_max_segment_size() argument
1187 return q->limits.max_segment_size; in queue_max_segment_size()
1190 static inline unsigned short queue_logical_block_size(struct request_queue *q) in queue_logical_block_size() argument
1194 if (q && q->limits.logical_block_size) in queue_logical_block_size()
1195 retval = q->limits.logical_block_size; in queue_logical_block_size()
1205 static inline unsigned int queue_physical_block_size(struct request_queue *q) in queue_physical_block_size() argument
1207 return q->limits.physical_block_size; in queue_physical_block_size()
1215 static inline unsigned int queue_io_min(struct request_queue *q) in queue_io_min() argument
1217 return q->limits.io_min; in queue_io_min()
1225 static inline unsigned int queue_io_opt(struct request_queue *q) in queue_io_opt() argument
1227 return q->limits.io_opt; in queue_io_opt()
1235 static inline int queue_alignment_offset(struct request_queue *q) in queue_alignment_offset() argument
1237 if (q->limits.misaligned) in queue_alignment_offset()
1240 return q->limits.alignment_offset; in queue_alignment_offset()
1253 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() local
1255 if (q->limits.misaligned) in bdev_alignment_offset()
1261 return q->limits.alignment_offset; in bdev_alignment_offset()
1264 static inline int queue_discard_alignment(struct request_queue *q) in queue_discard_alignment() argument
1266 if (q->limits.discard_misaligned) in queue_discard_alignment()
1269 return q->limits.discard_alignment; in queue_discard_alignment()
1297 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() local
1302 return q->limits.discard_alignment; in bdev_discard_alignment()
1305 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) in queue_discard_zeroes_data() argument
1307 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) in queue_discard_zeroes_data()
1320 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() local
1322 if (q) in bdev_write_same()
1323 return q->limits.max_write_same_sectors; in bdev_write_same()
1328 static inline int queue_dma_alignment(struct request_queue *q) in queue_dma_alignment() argument
1330 return q ? q->dma_alignment : 511; in queue_dma_alignment()
1333 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned() argument
1336 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; in blk_rq_aligned()
1356 static inline bool queue_flush_queueable(struct request_queue *q) in queue_flush_queueable() argument
1358 return !q->flush_not_queueable; in queue_flush_queueable()
1370 static inline bool __bvec_gap_to_prev(struct request_queue *q, in __bvec_gap_to_prev() argument
1374 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); in __bvec_gap_to_prev()
1381 static inline bool bvec_gap_to_prev(struct request_queue *q, in bvec_gap_to_prev() argument
1384 if (!queue_virt_boundary(q)) in bvec_gap_to_prev()
1386 return __bvec_gap_to_prev(q, bprv, offset); in bvec_gap_to_prev()
1389 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, in bio_will_gap() argument
1392 if (bio_has_data(prev) && queue_virt_boundary(q)) { in bio_will_gap()
1398 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); in bio_will_gap()
1406 return bio_will_gap(req->q, req->biotail, bio); in req_gap_back_merge()
1411 return bio_will_gap(req->q, bio, req->bio); in req_gap_front_merge()
1524 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1527 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
1531 queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments() argument
1533 return q->limits.max_integrity_segments; in queue_max_integrity_segments()
1542 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], in integrity_req_gap_back_merge()
1552 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], in integrity_req_gap_front_merge()
1567 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg() argument
1572 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg() argument
1597 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1601 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments() argument