Lines Matching refs:q
12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, in __blk_recalc_rq_segments() argument
36 cluster = blk_queue_cluster(q); in __blk_recalc_rq_segments()
54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); in __blk_recalc_rq_segments()
57 > queue_max_segment_size(q)) in __blk_recalc_rq_segments()
61 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) in __blk_recalc_rq_segments()
92 &rq->q->queue_flags); in blk_recalc_rq_segments()
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
98 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
108 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && in blk_recount_segments()
109 (seg_cnt < queue_max_segments(q))) in blk_recount_segments()
115 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); in blk_recount_segments()
123 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, in blk_phys_contig_segment() argument
129 if (!blk_queue_cluster(q)) in blk_phys_contig_segment()
133 queue_max_segment_size(q)) in blk_phys_contig_segment()
152 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) in blk_phys_contig_segment()
159 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg() argument
167 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg()
172 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) in __blk_segment_map_sg()
201 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
210 cluster = blk_queue_cluster(q); in __blk_bios_map_sg()
237 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, in __blk_bios_map_sg()
247 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
254 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
257 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg()
259 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg()
265 if (q->dma_drain_size && q->dma_drain_needed(rq)) { in blk_rq_map_sg()
267 memset(q->dma_drain_buffer, 0, q->dma_drain_size); in blk_rq_map_sg()
271 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), in blk_rq_map_sg()
272 q->dma_drain_size, in blk_rq_map_sg()
273 ((unsigned long)q->dma_drain_buffer) & in blk_rq_map_sg()
276 rq->extra_len += q->dma_drain_size; in blk_rq_map_sg()
286 static inline int ll_new_hw_segment(struct request_queue *q, in ll_new_hw_segment() argument
290 int nr_phys_segs = bio_phys_segments(q, bio); in ll_new_hw_segment()
292 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
295 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
307 if (req == q->last_merge) in ll_new_hw_segment()
308 q->last_merge = NULL; in ll_new_hw_segment()
312 int ll_back_merge_fn(struct request_queue *q, struct request *req, in ll_back_merge_fn() argument
318 if (req == q->last_merge) in ll_back_merge_fn()
319 q->last_merge = NULL; in ll_back_merge_fn()
323 blk_recount_segments(q, req->biotail); in ll_back_merge_fn()
325 blk_recount_segments(q, bio); in ll_back_merge_fn()
327 return ll_new_hw_segment(q, req, bio); in ll_back_merge_fn()
330 int ll_front_merge_fn(struct request_queue *q, struct request *req, in ll_front_merge_fn() argument
336 if (req == q->last_merge) in ll_front_merge_fn()
337 q->last_merge = NULL; in ll_front_merge_fn()
341 blk_recount_segments(q, bio); in ll_front_merge_fn()
343 blk_recount_segments(q, req->bio); in ll_front_merge_fn()
345 return ll_new_hw_segment(q, req, bio); in ll_front_merge_fn()
354 struct request_queue *q = req->q; in req_no_special_merge() local
356 return !q->mq_ops && req->special; in req_no_special_merge()
367 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
381 if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && in ll_merge_requests_fn()
393 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { in ll_merge_requests_fn()
401 if (total_phys_segments > queue_max_segments(q)) in ll_merge_requests_fn()
404 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
462 static int attempt_merge(struct request_queue *q, struct request *req, in attempt_merge() argument
492 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
522 elv_merge_requests(q, req, next); in attempt_merge()
535 __blk_put_request(q, next); in attempt_merge()
539 int attempt_back_merge(struct request_queue *q, struct request *rq) in attempt_back_merge() argument
541 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
544 return attempt_merge(q, rq, next); in attempt_back_merge()
549 int attempt_front_merge(struct request_queue *q, struct request *rq) in attempt_front_merge() argument
551 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
554 return attempt_merge(q, prev, rq); in attempt_front_merge()
559 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
562 return attempt_merge(q, rq, next); in blk_attempt_req_merge()
567 struct request_queue *q = rq->q; in blk_rq_merge_ok() local
584 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { in blk_rq_merge_ok()