Lines Matching refs:q
12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument
25 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
27 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_bio_discard_split()
44 alignment = (q->limits.discard_alignment >> 9) % granularity; in blk_bio_discard_split()
55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() argument
62 if (!q->limits.max_write_same_sectors) in blk_bio_write_same_split()
65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
71 static inline unsigned get_max_io_size(struct request_queue *q, in get_max_io_size() argument
74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size()
75 unsigned mask = queue_logical_block_size(q) - 1; in get_max_io_size()
83 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split() argument
94 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
101 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split()
109 if (nsegs < queue_max_segments(q) && in blk_bio_segment_split()
119 if (bvprvp && blk_queue_cluster(q)) { in blk_bio_segment_split()
120 if (seg_size + bv.bv_len > queue_max_segment_size(q)) in blk_bio_segment_split()
124 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) in blk_bio_segment_split()
137 if (nsegs == queue_max_segments(q)) in blk_bio_segment_split()
167 void blk_queue_split(struct request_queue *q, struct bio **bio, in blk_queue_split() argument
174 split = blk_bio_discard_split(q, *bio, bs, &nsegs); in blk_queue_split()
176 split = blk_bio_write_same_split(q, *bio, bs, &nsegs); in blk_queue_split()
178 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); in blk_queue_split()
196 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, in __blk_recalc_rq_segments() argument
220 cluster = blk_queue_cluster(q); in __blk_recalc_rq_segments()
234 > queue_max_segment_size(q)) in __blk_recalc_rq_segments()
238 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) in __blk_recalc_rq_segments()
269 &rq->q->queue_flags); in blk_recalc_rq_segments()
271 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
275 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
285 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && in blk_recount_segments()
286 (seg_cnt < queue_max_segments(q))) in blk_recount_segments()
292 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); in blk_recount_segments()
300 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, in blk_phys_contig_segment() argument
306 if (!blk_queue_cluster(q)) in blk_phys_contig_segment()
310 queue_max_segment_size(q)) in blk_phys_contig_segment()
329 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) in blk_phys_contig_segment()
336 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg() argument
344 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg()
349 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) in __blk_segment_map_sg()
378 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
387 cluster = blk_queue_cluster(q); in __blk_bios_map_sg()
414 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, in __blk_bios_map_sg()
424 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
431 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
434 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg()
436 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg()
442 if (q->dma_drain_size && q->dma_drain_needed(rq)) { in blk_rq_map_sg()
444 memset(q->dma_drain_buffer, 0, q->dma_drain_size); in blk_rq_map_sg()
448 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), in blk_rq_map_sg()
449 q->dma_drain_size, in blk_rq_map_sg()
450 ((unsigned long)q->dma_drain_buffer) & in blk_rq_map_sg()
453 rq->extra_len += q->dma_drain_size; in blk_rq_map_sg()
469 static inline int ll_new_hw_segment(struct request_queue *q, in ll_new_hw_segment() argument
473 int nr_phys_segs = bio_phys_segments(q, bio); in ll_new_hw_segment()
475 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
478 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
490 if (req == q->last_merge) in ll_new_hw_segment()
491 q->last_merge = NULL; in ll_new_hw_segment()
495 int ll_back_merge_fn(struct request_queue *q, struct request *req, in ll_back_merge_fn() argument
506 if (req == q->last_merge) in ll_back_merge_fn()
507 q->last_merge = NULL; in ll_back_merge_fn()
511 blk_recount_segments(q, req->biotail); in ll_back_merge_fn()
513 blk_recount_segments(q, bio); in ll_back_merge_fn()
515 return ll_new_hw_segment(q, req, bio); in ll_back_merge_fn()
518 int ll_front_merge_fn(struct request_queue *q, struct request *req, in ll_front_merge_fn() argument
530 if (req == q->last_merge) in ll_front_merge_fn()
531 q->last_merge = NULL; in ll_front_merge_fn()
535 blk_recount_segments(q, bio); in ll_front_merge_fn()
537 blk_recount_segments(q, req->bio); in ll_front_merge_fn()
539 return ll_new_hw_segment(q, req, bio); in ll_front_merge_fn()
548 struct request_queue *q = req->q; in req_no_special_merge() local
550 return !q->mq_ops && req->special; in req_no_special_merge()
553 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
578 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { in ll_merge_requests_fn()
586 if (total_phys_segments > queue_max_segments(q)) in ll_merge_requests_fn()
589 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
647 static int attempt_merge(struct request_queue *q, struct request *req, in attempt_merge() argument
677 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
707 elv_merge_requests(q, req, next); in attempt_merge()
720 __blk_put_request(q, next); in attempt_merge()
724 int attempt_back_merge(struct request_queue *q, struct request *rq) in attempt_back_merge() argument
726 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
729 return attempt_merge(q, rq, next); in attempt_back_merge()
734 int attempt_front_merge(struct request_queue *q, struct request *rq) in attempt_front_merge() argument
736 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
739 return attempt_merge(q, prev, rq); in attempt_front_merge()
744 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
747 return attempt_merge(q, rq, next); in blk_attempt_req_merge()
767 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()