Lines Matching refs:q
12 static bool iovec_gap_to_prv(struct request_queue *q, in iovec_gap_to_prv() argument
17 if (!queue_virt_boundary(q)) in iovec_gap_to_prv()
26 return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || in iovec_gap_to_prv()
27 prev_end & queue_virt_boundary(q)); in iovec_gap_to_prv()
30 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument
34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument
102 if ((uaddr & queue_dma_alignment(q)) || in blk_rq_map_user_iov()
103 iovec_gap_to_prv(q, &prv, &iov)) in blk_rq_map_user_iov()
110 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) in blk_rq_map_user_iov()
111 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov()
113 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov()
136 blk_queue_bounce(q, &bio); in blk_rq_map_user_iov()
138 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov()
143 int blk_rq_map_user(struct request_queue *q, struct request *rq, in blk_rq_map_user() argument
154 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user()
203 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, in blk_rq_map_kern() argument
212 if (len > (queue_max_hw_sectors(q) << 9)) in blk_rq_map_kern()
217 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); in blk_rq_map_kern()
219 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
221 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
232 ret = blk_rq_append_bio(q, rq, bio); in blk_rq_map_kern()
239 blk_queue_bounce(q, &rq->bio); in blk_rq_map_kern()