bio                62 arch/m68k/emu/nfblock.c static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
bio                68 arch/m68k/emu/nfblock.c 	sector_t sec = bio->bi_iter.bi_sector;
bio                70 arch/m68k/emu/nfblock.c 	dir = bio_data_dir(bio);
bio                72 arch/m68k/emu/nfblock.c 	bio_for_each_segment(bvec, bio, iter) {
bio                79 arch/m68k/emu/nfblock.c 	bio_endio(bio);
bio               104 arch/xtensa/platforms/iss/simdisk.c static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
bio               109 arch/xtensa/platforms/iss/simdisk.c 	sector_t sector = bio->bi_iter.bi_sector;
bio               111 arch/xtensa/platforms/iss/simdisk.c 	bio_for_each_segment(bvec, bio, iter) {
bio               116 arch/xtensa/platforms/iss/simdisk.c 				bio_data_dir(bio) == WRITE);
bio               121 arch/xtensa/platforms/iss/simdisk.c 	bio_endio(bio);
bio               713 block/bfq-cgroup.c void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
bio               720 block/bfq-cgroup.c 	serial_nr = __bio_blkcg(bio)->css.serial_nr;
bio               729 block/bfq-cgroup.c 	bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
bio              1392 block/bfq-cgroup.c void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
bio              2116 block/bfq-iosched.c 					  struct bio *bio,
bio              2123 block/bfq-iosched.c 		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
bio              2213 block/bfq-iosched.c static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
bio              2232 block/bfq-iosched.c 		bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
bio              2237 block/bfq-iosched.c 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
bio              2247 block/bfq-iosched.c 			     struct bio *bio)
bio              2252 block/bfq-iosched.c 	__rq = bfq_find_rq_fmerge(bfqd, bio, q);
bio              2253 block/bfq-iosched.c 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
bio              2400 block/bfq-iosched.c 		return ((struct bio *)io_struct)->bi_iter.bi_sector;
bio              2814 block/bfq-iosched.c 				struct bio *bio)
bio              2817 block/bfq-iosched.c 	bool is_sync = op_is_sync(bio->bi_opf);
bio              2837 block/bfq-iosched.c 	new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
bio              5015 block/bfq-iosched.c 				       struct bio *bio, bool is_sync,
bio              5018 block/bfq-iosched.c static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bio              5036 block/bfq-iosched.c 		bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bio              5125 block/bfq-iosched.c 				       struct bio *bio, bool is_sync,
bio              5136 block/bfq-iosched.c 	bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
bio              6002 block/bfq-iosched.c 						   struct bio *bio,
bio              6016 block/bfq-iosched.c 	bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
bio              6069 block/bfq-iosched.c static void bfq_prepare_request(struct request *rq, struct bio *bio)
bio              6105 block/bfq-iosched.c 	struct bio *bio = rq->bio;
bio              6128 block/bfq-iosched.c 	bfq_check_ioprio_change(bic, bio);
bio              6130 block/bfq-iosched.c 	bfq_bic_update_cgroup(bic, bio);
bio              6132 block/bfq-iosched.c 	bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
bio              6148 block/bfq-iosched.c 				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
bio               976 block/bfq-iosched.h void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
bio                37 block/bio-integrity.c struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
bio                42 block/bio-integrity.c 	struct bio_set *bs = bio->bi_pool;
bio                72 block/bio-integrity.c 	bip->bip_bio = bio;
bio                73 block/bio-integrity.c 	bio->bi_integrity = bip;
bio                74 block/bio-integrity.c 	bio->bi_opf |= REQ_INTEGRITY;
bio                90 block/bio-integrity.c void bio_integrity_free(struct bio *bio)
bio                92 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio                93 block/bio-integrity.c 	struct bio_set *bs = bio->bi_pool;
bio               107 block/bio-integrity.c 	bio->bi_integrity = NULL;
bio               108 block/bio-integrity.c 	bio->bi_opf &= ~REQ_INTEGRITY;
bio               120 block/bio-integrity.c int bio_integrity_add_page(struct bio *bio, struct page *page,
bio               123 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               134 block/bio-integrity.c 	    bvec_gap_to_prev(bio->bi_disk->queue,
bio               153 block/bio-integrity.c static blk_status_t bio_integrity_process(struct bio *bio,
bio               156 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               160 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               165 block/bio-integrity.c 	iter.disk_name = bio->bi_disk->disk_name;
bio               170 block/bio-integrity.c 	__bio_for_each_segment(bv, bio, bviter, *proc_iter) {
bio               199 block/bio-integrity.c bool bio_integrity_prep(struct bio *bio)
bio               202 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               203 block/bio-integrity.c 	struct request_queue *q = bio->bi_disk->queue;
bio               214 block/bio-integrity.c 	if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
bio               217 block/bio-integrity.c 	if (!bio_sectors(bio))
bio               221 block/bio-integrity.c 	if (bio_integrity(bio))
bio               224 block/bio-integrity.c 	if (bio_data_dir(bio) == READ) {
bio               233 block/bio-integrity.c 	intervals = bio_integrity_intervals(bi, bio_sectors(bio));
bio               249 block/bio-integrity.c 	bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
bio               259 block/bio-integrity.c 	bip_set_seed(bip, bio->bi_iter.bi_sector);
bio               276 block/bio-integrity.c 		ret = bio_integrity_add_page(bio, virt_to_page(buf),
bio               295 block/bio-integrity.c 	if (bio_data_dir(bio) == WRITE) {
bio               296 block/bio-integrity.c 		bio_integrity_process(bio, &bio->bi_iter,
bio               299 block/bio-integrity.c 		bip->bio_iter = bio->bi_iter;
bio               304 block/bio-integrity.c 	bio->bi_status = status;
bio               305 block/bio-integrity.c 	bio_endio(bio);
bio               323 block/bio-integrity.c 	struct bio *bio = bip->bip_bio;
bio               324 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               331 block/bio-integrity.c 	bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
bio               333 block/bio-integrity.c 	bio_integrity_free(bio);
bio               334 block/bio-integrity.c 	bio_endio(bio);
bio               348 block/bio-integrity.c bool __bio_integrity_endio(struct bio *bio)
bio               350 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               351 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               353 block/bio-integrity.c 	if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
bio               360 block/bio-integrity.c 	bio_integrity_free(bio);
bio               373 block/bio-integrity.c void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
bio               375 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               376 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               389 block/bio-integrity.c void bio_integrity_trim(struct bio *bio)
bio               391 block/bio-integrity.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               392 block/bio-integrity.c 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bio               394 block/bio-integrity.c 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
bio               406 block/bio-integrity.c int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
bio               414 block/bio-integrity.c 	bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
bio                64 block/bio.c    	unsigned int sz = sizeof(struct bio) + extra_size;
bio               233 block/bio.c    void bio_uninit(struct bio *bio)
bio               235 block/bio.c    	bio_disassociate_blkg(bio);
bio               237 block/bio.c    	if (bio_integrity(bio))
bio               238 block/bio.c    		bio_integrity_free(bio);
bio               242 block/bio.c    static void bio_free(struct bio *bio)
bio               244 block/bio.c    	struct bio_set *bs = bio->bi_pool;
bio               247 block/bio.c    	bio_uninit(bio);
bio               250 block/bio.c    		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
bio               255 block/bio.c    		p = bio;
bio               261 block/bio.c    		kfree(bio);
bio               270 block/bio.c    void bio_init(struct bio *bio, struct bio_vec *table,
bio               273 block/bio.c    	memset(bio, 0, sizeof(*bio));
bio               274 block/bio.c    	atomic_set(&bio->__bi_remaining, 1);
bio               275 block/bio.c    	atomic_set(&bio->__bi_cnt, 1);
bio               277 block/bio.c    	bio->bi_io_vec = table;
bio               278 block/bio.c    	bio->bi_max_vecs = max_vecs;
bio               292 block/bio.c    void bio_reset(struct bio *bio)
bio               294 block/bio.c    	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
bio               296 block/bio.c    	bio_uninit(bio);
bio               298 block/bio.c    	memset(bio, 0, BIO_RESET_BYTES);
bio               299 block/bio.c    	bio->bi_flags = flags;
bio               300 block/bio.c    	atomic_set(&bio->__bi_remaining, 1);
bio               304 block/bio.c    static struct bio *__bio_chain_endio(struct bio *bio)
bio               306 block/bio.c    	struct bio *parent = bio->bi_private;
bio               309 block/bio.c    		parent->bi_status = bio->bi_status;
bio               310 block/bio.c    	bio_put(bio);
bio               314 block/bio.c    static void bio_chain_endio(struct bio *bio)
bio               316 block/bio.c    	bio_endio(__bio_chain_endio(bio));
bio               330 block/bio.c    void bio_chain(struct bio *bio, struct bio *parent)
bio               332 block/bio.c    	BUG_ON(bio->bi_private || bio->bi_end_io);
bio               334 block/bio.c    	bio->bi_private = parent;
bio               335 block/bio.c    	bio->bi_end_io	= bio_chain_endio;
bio               343 block/bio.c    	struct bio *bio;
bio               347 block/bio.c    		bio = bio_list_pop(&bs->rescue_list);
bio               350 block/bio.c    		if (!bio)
bio               353 block/bio.c    		generic_make_request(bio);
bio               360 block/bio.c    	struct bio *bio;
bio               378 block/bio.c    	while ((bio = bio_list_pop(&current->bio_list[0])))
bio               379 block/bio.c    		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
bio               383 block/bio.c    	while ((bio = bio_list_pop(&current->bio_list[1])))
bio               384 block/bio.c    		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
bio               429 block/bio.c    struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
bio               436 block/bio.c    	struct bio *bio;
bio               443 block/bio.c    		p = kmalloc(sizeof(struct bio) +
bio               494 block/bio.c    	bio = p + front_pad;
bio               495 block/bio.c    	bio_init(bio, NULL, 0);
bio               510 block/bio.c    		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
bio               512 block/bio.c    		bvl = bio->bi_inline_vecs;
bio               515 block/bio.c    	bio->bi_pool = bs;
bio               516 block/bio.c    	bio->bi_max_vecs = nr_iovecs;
bio               517 block/bio.c    	bio->bi_io_vec = bvl;
bio               518 block/bio.c    	return bio;
bio               526 block/bio.c    void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
bio               532 block/bio.c    	__bio_for_each_segment(bv, bio, iter, start) {
bio               551 block/bio.c    void bio_truncate(struct bio *bio, unsigned new_size)
bio               558 block/bio.c    	if (new_size >= bio->bi_iter.bi_size)
bio               561 block/bio.c    	if (bio_op(bio) != REQ_OP_READ)
bio               564 block/bio.c    	bio_for_each_segment(bv, bio, iter) {
bio               587 block/bio.c    	bio->bi_iter.bi_size = new_size;
bio               598 block/bio.c    void bio_put(struct bio *bio)
bio               600 block/bio.c    	if (!bio_flagged(bio, BIO_REFFED))
bio               601 block/bio.c    		bio_free(bio);
bio               603 block/bio.c    		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
bio               608 block/bio.c    		if (atomic_dec_and_test(&bio->__bi_cnt))
bio               609 block/bio.c    			bio_free(bio);
bio               625 block/bio.c    void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio               627 block/bio.c    	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
bio               633 block/bio.c    	bio->bi_disk = bio_src->bi_disk;
bio               634 block/bio.c    	bio->bi_partno = bio_src->bi_partno;
bio               635 block/bio.c    	bio_set_flag(bio, BIO_CLONED);
bio               637 block/bio.c    		bio_set_flag(bio, BIO_THROTTLED);
bio               638 block/bio.c    	bio->bi_opf = bio_src->bi_opf;
bio               639 block/bio.c    	bio->bi_ioprio = bio_src->bi_ioprio;
bio               640 block/bio.c    	bio->bi_write_hint = bio_src->bi_write_hint;
bio               641 block/bio.c    	bio->bi_iter = bio_src->bi_iter;
bio               642 block/bio.c    	bio->bi_io_vec = bio_src->bi_io_vec;
bio               644 block/bio.c    	bio_clone_blkg_association(bio, bio_src);
bio               645 block/bio.c    	blkcg_bio_issue_init(bio);
bio               657 block/bio.c    struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
bio               659 block/bio.c    	struct bio *b;
bio               665 block/bio.c    	__bio_clone_fast(b, bio);
bio               667 block/bio.c    	if (bio_integrity(bio)) {
bio               670 block/bio.c    		ret = bio_integrity_clone(b, bio, gfp_mask);
bio               701 block/bio.c    static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
bio               705 block/bio.c    	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
bio               714 block/bio.c    	return __bio_try_merge_page(bio, page, len, offset, same_page);
bio               733 block/bio.c    static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
bio               742 block/bio.c    	if (unlikely(bio_flagged(bio, BIO_CLONED)))
bio               745 block/bio.c    	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
bio               748 block/bio.c    	if (bio->bi_vcnt > 0) {
bio               749 block/bio.c    		if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
bio               756 block/bio.c    		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
bio               761 block/bio.c    	if (bio_full(bio, len))
bio               764 block/bio.c    	if (bio->bi_vcnt >= queue_max_segments(q))
bio               767 block/bio.c    	bvec = &bio->bi_io_vec[bio->bi_vcnt];
bio               771 block/bio.c    	bio->bi_vcnt++;
bio               772 block/bio.c    	bio->bi_iter.bi_size += len;
bio               776 block/bio.c    int bio_add_pc_page(struct request_queue *q, struct bio *bio,
bio               780 block/bio.c    	return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
bio               800 block/bio.c    bool __bio_try_merge_page(struct bio *bio, struct page *page,
bio               803 block/bio.c    	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
bio               806 block/bio.c    	if (bio->bi_vcnt > 0) {
bio               807 block/bio.c    		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
bio               810 block/bio.c    			if (bio->bi_iter.bi_size > UINT_MAX - len)
bio               813 block/bio.c    			bio->bi_iter.bi_size += len;
bio               831 block/bio.c    void __bio_add_page(struct bio *bio, struct page *page,
bio               834 block/bio.c    	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
bio               836 block/bio.c    	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
bio               837 block/bio.c    	WARN_ON_ONCE(bio_full(bio, len));
bio               843 block/bio.c    	bio->bi_iter.bi_size += len;
bio               844 block/bio.c    	bio->bi_vcnt++;
bio               846 block/bio.c    	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
bio               847 block/bio.c    		bio_set_flag(bio, BIO_WORKINGSET);
bio               861 block/bio.c    int bio_add_page(struct bio *bio, struct page *page,
bio               866 block/bio.c    	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
bio               867 block/bio.c    		if (bio_full(bio, len))
bio               869 block/bio.c    		__bio_add_page(bio, page, len, offset);
bio               875 block/bio.c    void bio_release_pages(struct bio *bio, bool mark_dirty)
bio               880 block/bio.c    	if (bio_flagged(bio, BIO_NO_PAGE_REF))
bio               883 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               890 block/bio.c    static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
bio               900 block/bio.c    	size = bio_add_page(bio, bv->bv_page, len,
bio               920 block/bio.c    static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio               922 block/bio.c    	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
bio               923 block/bio.c    	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
bio               924 block/bio.c    	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
bio               948 block/bio.c    		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
bio               952 block/bio.c    			if (WARN_ON_ONCE(bio_full(bio, len)))
bio               954 block/bio.c    			__bio_add_page(bio, page, len, offset);
bio               983 block/bio.c    int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio               988 block/bio.c    	if (WARN_ON_ONCE(bio->bi_vcnt))
bio               993 block/bio.c    			ret = __bio_iov_bvec_add_pages(bio, iter);
bio               995 block/bio.c    			ret = __bio_iov_iter_get_pages(bio, iter);
bio               996 block/bio.c    	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
bio               999 block/bio.c    		bio_set_flag(bio, BIO_NO_PAGE_REF);
bio              1000 block/bio.c    	return bio->bi_vcnt ? 0 : ret;
bio              1003 block/bio.c    static void submit_bio_wait_endio(struct bio *bio)
bio              1005 block/bio.c    	complete(bio->bi_private);
bio              1019 block/bio.c    int submit_bio_wait(struct bio *bio)
bio              1021 block/bio.c    	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
bio              1023 block/bio.c    	bio->bi_private = &done;
bio              1024 block/bio.c    	bio->bi_end_io = submit_bio_wait_endio;
bio              1025 block/bio.c    	bio->bi_opf |= REQ_SYNC;
bio              1026 block/bio.c    	submit_bio(bio);
bio              1029 block/bio.c    	return blk_status_to_errno(bio->bi_status);
bio              1044 block/bio.c    void bio_advance(struct bio *bio, unsigned bytes)
bio              1046 block/bio.c    	if (bio_integrity(bio))
bio              1047 block/bio.c    		bio_integrity_advance(bio, bytes);
bio              1049 block/bio.c    	bio_advance_iter(bio, &bio->bi_iter, bytes);
bio              1053 block/bio.c    void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
bio              1054 block/bio.c    			struct bio *src, struct bvec_iter *src_iter)
bio              1092 block/bio.c    void bio_copy_data(struct bio *dst, struct bio *src)
bio              1111 block/bio.c    void bio_list_copy_data(struct bio *dst, struct bio *src)
bio              1168 block/bio.c    static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
bio              1173 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              1199 block/bio.c    static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
bio              1204 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              1222 block/bio.c    void bio_free_pages(struct bio *bio)
bio              1227 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all)
bio              1239 block/bio.c    int bio_uncopy_user(struct bio *bio)
bio              1241 block/bio.c    	struct bio_map_data *bmd = bio->bi_private;
bio              1244 block/bio.c    	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
bio              1252 block/bio.c    		else if (bio_data_dir(bio) == READ)
bio              1253 block/bio.c    			ret = bio_copy_to_iter(bio, bmd->iter);
bio              1255 block/bio.c    			bio_free_pages(bio);
bio              1258 block/bio.c    	bio_put(bio);
bio              1273 block/bio.c    struct bio *bio_copy_user_iov(struct request_queue *q,
bio              1280 block/bio.c    	struct bio *bio;
bio              1302 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
bio              1303 block/bio.c    	if (!bio)
bio              1338 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
bio              1352 block/bio.c    		map_data->offset += bio->bi_iter.bi_size;
bio              1359 block/bio.c    		ret = bio_copy_from_iter(bio, iter);
bio              1364 block/bio.c    			zero_fill_bio(bio);
bio              1365 block/bio.c    		iov_iter_advance(iter, bio->bi_iter.bi_size);
bio              1368 block/bio.c    	bio->bi_private = bmd;
bio              1370 block/bio.c    		bio_set_flag(bio, BIO_NULL_MAPPED);
bio              1371 block/bio.c    	return bio;
bio              1374 block/bio.c    		bio_free_pages(bio);
bio              1375 block/bio.c    	bio_put(bio);
bio              1390 block/bio.c    struct bio *bio_map_user_iov(struct request_queue *q,
bio              1395 block/bio.c    	struct bio *bio;
bio              1401 block/bio.c    	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
bio              1402 block/bio.c    	if (!bio)
bio              1431 block/bio.c    				if (!__bio_add_pc_page(q, bio, page, n, offs,
bio              1455 block/bio.c    	bio_set_flag(bio, BIO_USER_MAPPED);
bio              1463 block/bio.c    	bio_get(bio);
bio              1464 block/bio.c    	return bio;
bio              1467 block/bio.c    	bio_release_pages(bio, false);
bio              1468 block/bio.c    	bio_put(bio);
bio              1481 block/bio.c    void bio_unmap_user(struct bio *bio)
bio              1483 block/bio.c    	bio_release_pages(bio, bio_data_dir(bio) == READ);
bio              1484 block/bio.c    	bio_put(bio);
bio              1485 block/bio.c    	bio_put(bio);
bio              1488 block/bio.c    static void bio_invalidate_vmalloc_pages(struct bio *bio)
bio              1491 block/bio.c    	if (bio->bi_private && !op_is_write(bio_op(bio))) {
bio              1494 block/bio.c    		for (i = 0; i < bio->bi_vcnt; i++)
bio              1495 block/bio.c    			len += bio->bi_io_vec[i].bv_len;
bio              1496 block/bio.c    		invalidate_kernel_vmap_range(bio->bi_private, len);
bio              1501 block/bio.c    static void bio_map_kern_endio(struct bio *bio)
bio              1503 block/bio.c    	bio_invalidate_vmalloc_pages(bio);
bio              1504 block/bio.c    	bio_put(bio);
bio              1517 block/bio.c    struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
bio              1527 block/bio.c    	struct bio *bio;
bio              1529 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
bio              1530 block/bio.c    	if (!bio)
bio              1535 block/bio.c    		bio->bi_private = data;
bio              1552 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes,
bio              1555 block/bio.c    			bio_put(bio);
bio              1564 block/bio.c    	bio->bi_end_io = bio_map_kern_endio;
bio              1565 block/bio.c    	return bio;
bio              1568 block/bio.c    static void bio_copy_kern_endio(struct bio *bio)
bio              1570 block/bio.c    	bio_free_pages(bio);
bio              1571 block/bio.c    	bio_put(bio);
bio              1574 block/bio.c    static void bio_copy_kern_endio_read(struct bio *bio)
bio              1576 block/bio.c    	char *p = bio->bi_private;
bio              1580 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              1585 block/bio.c    	bio_copy_kern_endio(bio);
bio              1599 block/bio.c    struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
bio              1605 block/bio.c    	struct bio *bio;
bio              1616 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
bio              1617 block/bio.c    	if (!bio)
bio              1634 block/bio.c    		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
bio              1642 block/bio.c    		bio->bi_end_io = bio_copy_kern_endio_read;
bio              1643 block/bio.c    		bio->bi_private = data;
bio              1645 block/bio.c    		bio->bi_end_io = bio_copy_kern_endio;
bio              1648 block/bio.c    	return bio;
bio              1651 block/bio.c    	bio_free_pages(bio);
bio              1652 block/bio.c    	bio_put(bio);
bio              1685 block/bio.c    void bio_set_pages_dirty(struct bio *bio)
bio              1690 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              1711 block/bio.c    static struct bio *bio_dirty_list;
bio              1718 block/bio.c    	struct bio *bio, *next;
bio              1725 block/bio.c    	while ((bio = next) != NULL) {
bio              1726 block/bio.c    		next = bio->bi_private;
bio              1728 block/bio.c    		bio_release_pages(bio, true);
bio              1729 block/bio.c    		bio_put(bio);
bio              1733 block/bio.c    void bio_check_pages_dirty(struct bio *bio)
bio              1739 block/bio.c    	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              1744 block/bio.c    	bio_release_pages(bio, false);
bio              1745 block/bio.c    	bio_put(bio);
bio              1749 block/bio.c    	bio->bi_private = bio_dirty_list;
bio              1750 block/bio.c    	bio_dirty_list = bio;
bio              1805 block/bio.c    static inline bool bio_remaining_done(struct bio *bio)
bio              1811 block/bio.c    	if (!bio_flagged(bio, BIO_CHAIN))
bio              1814 block/bio.c    	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
bio              1816 block/bio.c    	if (atomic_dec_and_test(&bio->__bi_remaining)) {
bio              1817 block/bio.c    		bio_clear_flag(bio, BIO_CHAIN);
bio              1838 block/bio.c    void bio_endio(struct bio *bio)
bio              1841 block/bio.c    	if (!bio_remaining_done(bio))
bio              1843 block/bio.c    	if (!bio_integrity_endio(bio))
bio              1846 block/bio.c    	if (bio->bi_disk)
bio              1847 block/bio.c    		rq_qos_done_bio(bio->bi_disk->queue, bio);
bio              1857 block/bio.c    	if (bio->bi_end_io == bio_chain_endio) {
bio              1858 block/bio.c    		bio = __bio_chain_endio(bio);
bio              1862 block/bio.c    	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
bio              1863 block/bio.c    		trace_block_bio_complete(bio->bi_disk->queue, bio,
bio              1864 block/bio.c    					 blk_status_to_errno(bio->bi_status));
bio              1865 block/bio.c    		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
bio              1868 block/bio.c    	blk_throtl_bio_endio(bio);
bio              1870 block/bio.c    	bio_uninit(bio);
bio              1871 block/bio.c    	if (bio->bi_end_io)
bio              1872 block/bio.c    		bio->bi_end_io(bio);
bio              1890 block/bio.c    struct bio *bio_split(struct bio *bio, int sectors,
bio              1893 block/bio.c    	struct bio *split;
bio              1896 block/bio.c    	BUG_ON(sectors >= bio_sectors(bio));
bio              1898 block/bio.c    	split = bio_clone_fast(bio, gfp, bs);
bio              1907 block/bio.c    	bio_advance(bio, split->bi_iter.bi_size);
bio              1909 block/bio.c    	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio              1922 block/bio.c    void bio_trim(struct bio *bio, int offset, int size)
bio              1929 block/bio.c    	if (offset == 0 && size == bio->bi_iter.bi_size)
bio              1932 block/bio.c    	bio_advance(bio, offset << 9);
bio              1933 block/bio.c    	bio->bi_iter.bi_size = size;
bio              1935 block/bio.c    	if (bio_integrity(bio))
bio              1936 block/bio.c    		bio_integrity_trim(bio);
bio              2059 block/bio.c    void bio_disassociate_blkg(struct bio *bio)
bio              2061 block/bio.c    	if (bio->bi_blkg) {
bio              2062 block/bio.c    		blkg_put(bio->bi_blkg);
bio              2063 block/bio.c    		bio->bi_blkg = NULL;
bio              2082 block/bio.c    static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
bio              2084 block/bio.c    	bio_disassociate_blkg(bio);
bio              2086 block/bio.c    	bio->bi_blkg = blkg_tryget_closest(blkg);
bio              2098 block/bio.c    void bio_associate_blkg_from_css(struct bio *bio,
bio              2101 block/bio.c    	struct request_queue *q = bio->bi_disk->queue;
bio              2111 block/bio.c    	__bio_associate_blkg(bio, blkg);
bio              2127 block/bio.c    void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
bio              2137 block/bio.c    	bio_associate_blkg_from_css(bio, css);
bio              2152 block/bio.c    void bio_associate_blkg(struct bio *bio)
bio              2158 block/bio.c    	if (bio->bi_blkg)
bio              2159 block/bio.c    		css = &bio_blkcg(bio)->css;
bio              2163 block/bio.c    	bio_associate_blkg_from_css(bio, css);
bio              2174 block/bio.c    void bio_clone_blkg_association(struct bio *dst, struct bio *src)
bio               125 block/blk-cgroup.c 	struct bio *bio;
bio               133 block/blk-cgroup.c 	while ((bio = bio_list_pop(&bios)))
bio               134 block/blk-cgroup.c 		submit_bio(bio);
bio              1614 block/blk-cgroup.c bool __blkcg_punt_bio_submit(struct bio *bio)
bio              1616 block/blk-cgroup.c 	struct blkcg_gq *blkg = bio->bi_blkg;
bio              1619 block/blk-cgroup.c 	bio->bi_opf &= ~REQ_CGROUP_PUNT;
bio              1626 block/blk-cgroup.c 	bio_list_add(&blkg->async_bios, bio);
bio               229 block/blk-core.c static void req_bio_endio(struct request *rq, struct bio *bio,
bio               233 block/blk-core.c 		bio->bi_status = error;
bio               236 block/blk-core.c 		bio_set_flag(bio, BIO_QUIET);
bio               238 block/blk-core.c 	bio_advance(bio, nbytes);
bio               241 block/blk-core.c 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio               242 block/blk-core.c 		bio_endio(bio);
bio               255 block/blk-core.c 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
bio               599 block/blk-core.c bool bio_attempt_back_merge(struct request *req, struct bio *bio,
bio               602 block/blk-core.c 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
bio               604 block/blk-core.c 	if (!ll_back_merge_fn(req, bio, nr_segs))
bio               607 block/blk-core.c 	trace_block_bio_backmerge(req->q, req, bio);
bio               608 block/blk-core.c 	rq_qos_merge(req->q, req, bio);
bio               613 block/blk-core.c 	req->biotail->bi_next = bio;
bio               614 block/blk-core.c 	req->biotail = bio;
bio               615 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
bio               621 block/blk-core.c bool bio_attempt_front_merge(struct request *req, struct bio *bio,
bio               624 block/blk-core.c 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
bio               626 block/blk-core.c 	if (!ll_front_merge_fn(req, bio, nr_segs))
bio               629 block/blk-core.c 	trace_block_bio_frontmerge(req->q, req, bio);
bio               630 block/blk-core.c 	rq_qos_merge(req->q, req, bio);
bio               635 block/blk-core.c 	bio->bi_next = req->bio;
bio               636 block/blk-core.c 	req->bio = bio;
bio               638 block/blk-core.c 	req->__sector = bio->bi_iter.bi_sector;
bio               639 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
bio               646 block/blk-core.c 		struct bio *bio)
bio               652 block/blk-core.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
bio               656 block/blk-core.c 	rq_qos_merge(q, req, bio);
bio               658 block/blk-core.c 	req->biotail->bi_next = bio;
bio               659 block/blk-core.c 	req->biotail = bio;
bio               660 block/blk-core.c 	req->__data_len += bio->bi_iter.bi_size;
bio               692 block/blk-core.c bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
bio               699 block/blk-core.c 	plug = blk_mq_plug(q, bio);
bio               717 block/blk-core.c 		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
bio               720 block/blk-core.c 		switch (blk_try_merge(rq, bio)) {
bio               722 block/blk-core.c 			merged = bio_attempt_back_merge(rq, bio, nr_segs);
bio               725 block/blk-core.c 			merged = bio_attempt_front_merge(rq, bio, nr_segs);
bio               728 block/blk-core.c 			merged = bio_attempt_discard_merge(q, rq, bio);
bio               741 block/blk-core.c static void handle_bad_sector(struct bio *bio, sector_t maxsector)
bio               747 block/blk-core.c 			bio_devname(bio, b), bio->bi_opf,
bio               748 block/blk-core.c 			(unsigned long long)bio_end_sector(bio),
bio               787 block/blk-core.c static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
bio               789 block/blk-core.c 	const int op = bio_op(bio);
bio               794 block/blk-core.c 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
bio               800 block/blk-core.c 			bio_devname(bio, b), part->partno);
bio               808 block/blk-core.c static noinline int should_fail_bio(struct bio *bio)
bio               810 block/blk-core.c 	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
bio               821 block/blk-core.c static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
bio               823 block/blk-core.c 	unsigned int nr_sectors = bio_sectors(bio);
bio               827 block/blk-core.c 	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
bio               828 block/blk-core.c 		handle_bad_sector(bio, maxsector);
bio               837 block/blk-core.c static inline int blk_partition_remap(struct bio *bio)
bio               843 block/blk-core.c 	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
bio               846 block/blk-core.c 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
bio               848 block/blk-core.c 	if (unlikely(bio_check_ro(bio, p)))
bio               855 block/blk-core.c 	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
bio               856 block/blk-core.c 		if (bio_check_eod(bio, part_nr_sects_read(p)))
bio               858 block/blk-core.c 		bio->bi_iter.bi_sector += p->start_sect;
bio               859 block/blk-core.c 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
bio               860 block/blk-core.c 				      bio->bi_iter.bi_sector - p->start_sect);
bio               862 block/blk-core.c 	bio->bi_partno = 0;
bio               870 block/blk-core.c generic_make_request_checks(struct bio *bio)
bio               873 block/blk-core.c 	int nr_sectors = bio_sectors(bio);
bio               879 block/blk-core.c 	q = bio->bi_disk->queue;
bio               884 block/blk-core.c 			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
bio               892 block/blk-core.c 	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
bio               895 block/blk-core.c 	if (should_fail_bio(bio))
bio               898 block/blk-core.c 	if (bio->bi_partno) {
bio               899 block/blk-core.c 		if (unlikely(blk_partition_remap(bio)))
bio               902 block/blk-core.c 		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
bio               904 block/blk-core.c 		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
bio               913 block/blk-core.c 	if (op_is_flush(bio->bi_opf) &&
bio               915 block/blk-core.c 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
bio               923 block/blk-core.c 		bio->bi_opf &= ~REQ_HIPRI;
bio               925 block/blk-core.c 	switch (bio_op(bio)) {
bio               962 block/blk-core.c 	if (!blkcg_bio_issue_check(q, bio))
bio               965 block/blk-core.c 	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
bio               966 block/blk-core.c 		trace_block_bio_queue(q, bio);
bio               970 block/blk-core.c 		bio_set_flag(bio, BIO_TRACE_COMPLETION);
bio               977 block/blk-core.c 	bio->bi_status = status;
bio               978 block/blk-core.c 	bio_endio(bio);
bio              1006 block/blk-core.c blk_qc_t generic_make_request(struct bio *bio)
bio              1018 block/blk-core.c 	if (!generic_make_request_checks(bio))
bio              1032 block/blk-core.c 		bio_list_add(&current->bio_list[0], bio);
bio              1050 block/blk-core.c 	BUG_ON(bio->bi_next);
bio              1054 block/blk-core.c 		struct request_queue *q = bio->bi_disk->queue;
bio              1055 block/blk-core.c 		blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
bio              1064 block/blk-core.c 			ret = q->make_request_fn(q, bio);
bio              1073 block/blk-core.c 			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
bio              1074 block/blk-core.c 				if (q == bio->bi_disk->queue)
bio              1075 block/blk-core.c 					bio_list_add(&same, bio);
bio              1077 block/blk-core.c 					bio_list_add(&lower, bio);
bio              1084 block/blk-core.c 					(bio->bi_opf & REQ_NOWAIT)))
bio              1085 block/blk-core.c 				bio_wouldblock_error(bio);
bio              1087 block/blk-core.c 				bio_io_error(bio);
bio              1089 block/blk-core.c 		bio = bio_list_pop(&bio_list_on_stack[0]);
bio              1090 block/blk-core.c 	} while (bio);
bio              1108 block/blk-core.c blk_qc_t direct_make_request(struct bio *bio)
bio              1110 block/blk-core.c 	struct request_queue *q = bio->bi_disk->queue;
bio              1111 block/blk-core.c 	bool nowait = bio->bi_opf & REQ_NOWAIT;
bio              1114 block/blk-core.c 	if (!generic_make_request_checks(bio))
bio              1119 block/blk-core.c 			bio->bi_status = BLK_STS_AGAIN;
bio              1121 block/blk-core.c 			bio->bi_status = BLK_STS_IOERR;
bio              1122 block/blk-core.c 		bio_endio(bio);
bio              1126 block/blk-core.c 	ret = q->make_request_fn(q, bio);
bio              1141 block/blk-core.c blk_qc_t submit_bio(struct bio *bio)
bio              1147 block/blk-core.c 	if (blkcg_punt_bio_submit(bio))
bio              1154 block/blk-core.c 	if (bio_has_data(bio)) {
bio              1157 block/blk-core.c 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
bio              1158 block/blk-core.c 			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
bio              1160 block/blk-core.c 			count = bio_sectors(bio);
bio              1162 block/blk-core.c 		if (op_is_write(bio_op(bio))) {
bio              1165 block/blk-core.c 			if (bio_flagged(bio, BIO_WORKINGSET))
bio              1167 block/blk-core.c 			task_io_account_read(bio->bi_iter.bi_size);
bio              1175 block/blk-core.c 				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
bio              1176 block/blk-core.c 				(unsigned long long)bio->bi_iter.bi_sector,
bio              1177 block/blk-core.c 				bio_devname(bio, b), count);
bio              1190 block/blk-core.c 	ret = generic_make_request(bio);
bio              1285 block/blk-core.c 	struct bio *bio;
bio              1297 block/blk-core.c 	for (bio = rq->bio; bio; bio = bio->bi_next) {
bio              1298 block/blk-core.c 		if ((bio->bi_opf & ff) != ff)
bio              1300 block/blk-core.c 		bytes += bio->bi_iter.bi_size;
bio              1389 block/blk-core.c 	if (rq->bio) {
bio              1391 block/blk-core.c 			list->tail->bi_next = rq->bio;
bio              1393 block/blk-core.c 			list->head = rq->bio;
bio              1396 block/blk-core.c 		rq->bio = NULL;
bio              1437 block/blk-core.c 	if (!req->bio)
bio              1453 block/blk-core.c 	while (req->bio) {
bio              1454 block/blk-core.c 		struct bio *bio = req->bio;
bio              1455 block/blk-core.c 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
bio              1457 block/blk-core.c 		if (bio_bytes == bio->bi_iter.bi_size)
bio              1458 block/blk-core.c 			req->bio = bio->bi_next;
bio              1461 block/blk-core.c 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
bio              1462 block/blk-core.c 		req_bio_endio(req, bio, bio_bytes, error);
bio              1474 block/blk-core.c 	if (!req->bio) {
bio              1493 block/blk-core.c 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
bio              1570 block/blk-core.c 	struct bio *bio;
bio              1572 block/blk-core.c 	while ((bio = rq->bio) != NULL) {
bio              1573 block/blk-core.c 		rq->bio = bio->bi_next;
bio              1575 block/blk-core.c 		bio_put(bio);
bio              1618 block/blk-core.c 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
bio              1621 block/blk-core.c 	struct bio *bio, *bio_src;
bio              1627 block/blk-core.c 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
bio              1628 block/blk-core.c 		if (!bio)
bio              1631 block/blk-core.c 		if (bio_ctr && bio_ctr(bio, bio_src, data))
bio              1634 block/blk-core.c 		if (rq->bio) {
bio              1635 block/blk-core.c 			rq->biotail->bi_next = bio;
bio              1636 block/blk-core.c 			rq->biotail = bio;
bio              1638 block/blk-core.c 			rq->bio = rq->biotail = bio;
bio              1646 block/blk-core.c 	if (bio)
bio              1647 block/blk-core.c 		bio_put(bio);
bio              1795 block/blk-core.c 			FIELD_SIZEOF(struct bio, bi_opf));
bio               128 block/blk-flush.c 	rq->bio = rq->biotail;
bio               393 block/blk-flush.c 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
bio               437 block/blk-flush.c 	struct bio *bio;
bio               456 block/blk-flush.c 	bio = bio_alloc(gfp_mask, 0);
bio               457 block/blk-flush.c 	bio_set_dev(bio, bdev);
bio               458 block/blk-flush.c 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio               460 block/blk-flush.c 	ret = submit_bio_wait(bio);
bio               468 block/blk-flush.c 		*error_sector = bio->bi_iter.bi_sector;
bio               470 block/blk-flush.c 	bio_put(bio);
bio                27 block/blk-integrity.c int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
bio                35 block/blk-integrity.c 	bio_for_each_integrity_vec(iv, bio, iter) {
bio                68 block/blk-integrity.c int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
bio                77 block/blk-integrity.c 	bio_for_each_integrity_vec(iv, bio, iter) {
bio               173 block/blk-integrity.c 	if (bio_integrity(req->bio)->bip_flags !=
bio               174 block/blk-integrity.c 	    bio_integrity(next->bio)->bip_flags)
bio               181 block/blk-integrity.c 	if (integrity_req_gap_back_merge(req, next->bio))
bio               189 block/blk-integrity.c 			     struct bio *bio)
bio               192 block/blk-integrity.c 	struct bio *next = bio->bi_next;
bio               194 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
bio               197 block/blk-integrity.c 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
bio               200 block/blk-integrity.c 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
bio               203 block/blk-integrity.c 	bio->bi_next = NULL;
bio               204 block/blk-integrity.c 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
bio               205 block/blk-integrity.c 	bio->bi_next = next;
bio               518 block/blk-iocost.c 	struct bio			*bio;
bio               672 block/blk-iocost.c static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
bio               674 block/blk-iocost.c 	bio->bi_iocost_cost = cost;
bio              1128 block/blk-iocost.c 	iocg_commit_bio(ctx->iocg, wait->bio, cost);
bio              1635 block/blk-iocost.c static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
bio              1640 block/blk-iocost.c 	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
bio              1644 block/blk-iocost.c 	switch (bio_op(bio)) {
bio              1660 block/blk-iocost.c 		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
bio              1676 block/blk-iocost.c static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
bio              1680 block/blk-iocost.c 	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
bio              1684 block/blk-iocost.c static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
bio              1686 block/blk-iocost.c 	struct blkcg_gq *blkg = bio->bi_blkg;
bio              1703 block/blk-iocost.c 	abs_cost = calc_vtime_cost(bio, iocg, false);
bio              1707 block/blk-iocost.c 	iocg->cursor = bio_end_sector(bio);
bio              1731 block/blk-iocost.c 		iocg_commit_bio(iocg, bio, cost);
bio              1745 block/blk-iocost.c 		iocg_commit_bio(iocg, bio, cost);
bio              1766 block/blk-iocost.c 	if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
bio              1770 block/blk-iocost.c 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
bio              1790 block/blk-iocost.c 	wait.bio = bio;
bio              1811 block/blk-iocost.c 			   struct bio *bio)
bio              1813 block/blk-iocost.c 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
bio              1815 block/blk-iocost.c 	sector_t bio_end = bio_end_sector(bio);
bio              1825 block/blk-iocost.c 	abs_cost = calc_vtime_cost(bio, iocg, true);
bio              1842 block/blk-iocost.c 	if (rq->bio && rq->bio->bi_iocost_cost &&
bio              1844 block/blk-iocost.c 		iocg_commit_bio(iocg, bio, cost);
bio              1858 block/blk-iocost.c 		iocg_commit_bio(iocg, bio, cost);
bio              1863 block/blk-iocost.c static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
bio              1865 block/blk-iocost.c 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
bio              1867 block/blk-iocost.c 	if (iocg && bio->bi_iocost_cost)
bio              1868 block/blk-iocost.c 		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
bio               460 block/blk-iolatency.c static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
bio               463 block/blk-iolatency.c 	struct blkcg_gq *blkg = bio->bi_blkg;
bio               464 block/blk-iolatency.c 	bool issue_as_root = bio_issue_as_root_blkg(bio);
bio               478 block/blk-iolatency.c 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
bio               588 block/blk-iolatency.c static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
bio               595 block/blk-iolatency.c 	bool issue_as_root = bio_issue_as_root_blkg(bio);
bio               599 block/blk-iolatency.c 	blkg = bio->bi_blkg;
bio               600 block/blk-iolatency.c 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
bio               603 block/blk-iolatency.c 	iolat = blkg_to_lat(bio->bi_blkg);
bio               625 block/blk-iolatency.c 		if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
bio               626 block/blk-iolatency.c 			iolatency_record_time(iolat, &bio->bi_issue, now,
bio                13 block/blk-lib.c struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
bio                15 block/blk-lib.c 	struct bio *new = bio_alloc(gfp, nr_pages);
bio                17 block/blk-lib.c 	if (bio) {
bio                18 block/blk-lib.c 		bio_chain(bio, new);
bio                19 block/blk-lib.c 		submit_bio(bio);
bio                27 block/blk-lib.c 		struct bio **biop)
bio                30 block/blk-lib.c 	struct bio *bio = *biop;
bio                63 block/blk-lib.c 		bio = blk_next_bio(bio, 0, gfp_mask);
bio                64 block/blk-lib.c 		bio->bi_iter.bi_sector = sector;
bio                65 block/blk-lib.c 		bio_set_dev(bio, bdev);
bio                66 block/blk-lib.c 		bio_set_op_attrs(bio, op, 0);
bio                68 block/blk-lib.c 		bio->bi_iter.bi_size = req_sects << 9;
bio                81 block/blk-lib.c 	*biop = bio;
bio               100 block/blk-lib.c 	struct bio *bio = NULL;
bio               106 block/blk-lib.c 			&bio);
bio               107 block/blk-lib.c 	if (!ret && bio) {
bio               108 block/blk-lib.c 		ret = submit_bio_wait(bio);
bio               111 block/blk-lib.c 		bio_put(bio);
bio               133 block/blk-lib.c 		struct bio **biop)
bio               137 block/blk-lib.c 	struct bio *bio = *biop;
bio               157 block/blk-lib.c 		bio = blk_next_bio(bio, 1, gfp_mask);
bio               158 block/blk-lib.c 		bio->bi_iter.bi_sector = sector;
bio               159 block/blk-lib.c 		bio_set_dev(bio, bdev);
bio               160 block/blk-lib.c 		bio->bi_vcnt = 1;
bio               161 block/blk-lib.c 		bio->bi_io_vec->bv_page = page;
bio               162 block/blk-lib.c 		bio->bi_io_vec->bv_offset = 0;
bio               163 block/blk-lib.c 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
bio               164 block/blk-lib.c 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
bio               167 block/blk-lib.c 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
bio               171 block/blk-lib.c 			bio->bi_iter.bi_size = nr_sects << 9;
bio               177 block/blk-lib.c 	*biop = bio;
bio               196 block/blk-lib.c 	struct bio *bio = NULL;
bio               202 block/blk-lib.c 			&bio);
bio               203 block/blk-lib.c 	if (ret == 0 && bio) {
bio               204 block/blk-lib.c 		ret = submit_bio_wait(bio);
bio               205 block/blk-lib.c 		bio_put(bio);
bio               214 block/blk-lib.c 		struct bio **biop, unsigned flags)
bio               216 block/blk-lib.c 	struct bio *bio = *biop;
bio               233 block/blk-lib.c 		bio = blk_next_bio(bio, 0, gfp_mask);
bio               234 block/blk-lib.c 		bio->bi_iter.bi_sector = sector;
bio               235 block/blk-lib.c 		bio_set_dev(bio, bdev);
bio               236 block/blk-lib.c 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
bio               238 block/blk-lib.c 			bio->bi_opf |= REQ_NOUNMAP;
bio               241 block/blk-lib.c 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
bio               245 block/blk-lib.c 			bio->bi_iter.bi_size = nr_sects << 9;
bio               251 block/blk-lib.c 	*biop = bio;
bio               270 block/blk-lib.c 		struct bio **biop)
bio               273 block/blk-lib.c 	struct bio *bio = *biop;
bio               284 block/blk-lib.c 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
bio               286 block/blk-lib.c 		bio->bi_iter.bi_sector = sector;
bio               287 block/blk-lib.c 		bio_set_dev(bio, bdev);
bio               288 block/blk-lib.c 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               292 block/blk-lib.c 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
bio               301 block/blk-lib.c 	*biop = bio;
bio               325 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
bio               363 block/blk-lib.c 	struct bio *bio;
bio               372 block/blk-lib.c 	bio = NULL;
bio               376 block/blk-lib.c 						  gfp_mask, &bio, flags);
bio               379 block/blk-lib.c 						gfp_mask, &bio);
bio               384 block/blk-lib.c 	if (ret == 0 && bio) {
bio               385 block/blk-lib.c 		ret = submit_bio_wait(bio);
bio               386 block/blk-lib.c 		bio_put(bio);
bio                18 block/blk-map.c int blk_rq_append_bio(struct request *rq, struct bio **bio)
bio                20 block/blk-map.c 	struct bio *orig_bio = *bio;
bio                25 block/blk-map.c 	blk_queue_bounce(rq->q, bio);
bio                27 block/blk-map.c 	bio_for_each_bvec(bv, *bio, iter)
bio                30 block/blk-map.c 	if (!rq->bio) {
bio                31 block/blk-map.c 		blk_rq_bio_prep(rq, *bio, nr_segs);
bio                33 block/blk-map.c 		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
bio                34 block/blk-map.c 			if (orig_bio != *bio) {
bio                35 block/blk-map.c 				bio_put(*bio);
bio                36 block/blk-map.c 				*bio = orig_bio;
bio                41 block/blk-map.c 		rq->biotail->bi_next = *bio;
bio                42 block/blk-map.c 		rq->biotail = *bio;
bio                43 block/blk-map.c 		rq->__data_len += (*bio)->bi_iter.bi_size;
bio                50 block/blk-map.c static int __blk_rq_unmap_user(struct bio *bio)
bio                54 block/blk-map.c 	if (bio) {
bio                55 block/blk-map.c 		if (bio_flagged(bio, BIO_USER_MAPPED))
bio                56 block/blk-map.c 			bio_unmap_user(bio);
bio                58 block/blk-map.c 			ret = bio_uncopy_user(bio);
bio                69 block/blk-map.c 	struct bio *bio, *orig_bio;
bio                73 block/blk-map.c 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
bio                75 block/blk-map.c 		bio = bio_map_user_iov(q, iter, gfp_mask);
bio                77 block/blk-map.c 	if (IS_ERR(bio))
bio                78 block/blk-map.c 		return PTR_ERR(bio);
bio                80 block/blk-map.c 	bio->bi_opf &= ~REQ_OP_MASK;
bio                81 block/blk-map.c 	bio->bi_opf |= req_op(rq);
bio                83 block/blk-map.c 	orig_bio = bio;
bio                89 block/blk-map.c 	ret = blk_rq_append_bio(rq, &bio);
bio                94 block/blk-map.c 	bio_get(bio);
bio               126 block/blk-map.c 	struct bio *bio = NULL;
bio               145 block/blk-map.c 		if (!bio)
bio               146 block/blk-map.c 			bio = rq->bio;
bio               149 block/blk-map.c 	if (!bio_flagged(bio, BIO_USER_MAPPED))
bio               154 block/blk-map.c 	blk_rq_unmap_user(bio);
bio               156 block/blk-map.c 	rq->bio = NULL;
bio               185 block/blk-map.c int blk_rq_unmap_user(struct bio *bio)
bio               187 block/blk-map.c 	struct bio *mapped_bio;
bio               190 block/blk-map.c 	while (bio) {
bio               191 block/blk-map.c 		mapped_bio = bio;
bio               192 block/blk-map.c 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
bio               193 block/blk-map.c 			mapped_bio = bio->bi_private;
bio               199 block/blk-map.c 		mapped_bio = bio;
bio               200 block/blk-map.c 		bio = bio->bi_next;
bio               227 block/blk-map.c 	struct bio *bio, *orig_bio;
bio               237 block/blk-map.c 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
bio               239 block/blk-map.c 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
bio               241 block/blk-map.c 	if (IS_ERR(bio))
bio               242 block/blk-map.c 		return PTR_ERR(bio);
bio               244 block/blk-map.c 	bio->bi_opf &= ~REQ_OP_MASK;
bio               245 block/blk-map.c 	bio->bi_opf |= req_op(rq);
bio               250 block/blk-map.c 	orig_bio = bio;
bio               251 block/blk-map.c 	ret = blk_rq_append_bio(rq, &bio);
bio                16 block/blk-merge.c 		struct request *prev_rq, struct bio *prev, struct bio *next)
bio                29 block/blk-merge.c 		bio_get_first_bvec(prev_rq->bio, &pb);
bio                51 block/blk-merge.c static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
bio                53 block/blk-merge.c 	return bio_will_gap(req->q, req, req->biotail, bio);
bio                56 block/blk-merge.c static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
bio                58 block/blk-merge.c 	return bio_will_gap(req->q, NULL, bio, req->bio);
bio                61 block/blk-merge.c static struct bio *blk_bio_discard_split(struct request_queue *q,
bio                62 block/blk-merge.c 					 struct bio *bio,
bio                85 block/blk-merge.c 	if (bio_sectors(bio) <= max_discard_sectors)
bio                96 block/blk-merge.c 	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
bio               102 block/blk-merge.c 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
bio               105 block/blk-merge.c static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
bio               106 block/blk-merge.c 		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
bio               113 block/blk-merge.c 	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
bio               116 block/blk-merge.c 	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
bio               119 block/blk-merge.c static struct bio *blk_bio_write_same_split(struct request_queue *q,
bio               120 block/blk-merge.c 					    struct bio *bio,
bio               129 block/blk-merge.c 	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
bio               132 block/blk-merge.c 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
bio               144 block/blk-merge.c 				       struct bio *bio)
bio               146 block/blk-merge.c 	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
bio               150 block/blk-merge.c 	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
bio               240 block/blk-merge.c static struct bio *blk_bio_segment_split(struct request_queue *q,
bio               241 block/blk-merge.c 					 struct bio *bio,
bio               248 block/blk-merge.c 	const unsigned max_sectors = get_max_io_size(q, bio);
bio               251 block/blk-merge.c 	bio_for_each_bvec(bv, bio, iter) {
bio               277 block/blk-merge.c 	return bio_split(bio, sectors, GFP_NOIO, bs);
bio               293 block/blk-merge.c void __blk_queue_split(struct request_queue *q, struct bio **bio,
bio               296 block/blk-merge.c 	struct bio *split;
bio               298 block/blk-merge.c 	switch (bio_op(*bio)) {
bio               301 block/blk-merge.c 		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
bio               304 block/blk-merge.c 		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
bio               308 block/blk-merge.c 		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
bio               312 block/blk-merge.c 		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
bio               328 block/blk-merge.c 		bio_set_flag(*bio, BIO_QUEUE_ENTERED);
bio               330 block/blk-merge.c 		bio_chain(split, *bio);
bio               331 block/blk-merge.c 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
bio               332 block/blk-merge.c 		generic_make_request(*bio);
bio               333 block/blk-merge.c 		*bio = split;
bio               348 block/blk-merge.c void blk_queue_split(struct request_queue *q, struct bio **bio)
bio               352 block/blk-merge.c 	__blk_queue_split(q, bio, &nr_segs);
bio               363 block/blk-merge.c 	if (!rq->bio)
bio               366 block/blk-merge.c 	switch (bio_op(rq->bio)) {
bio               462 block/blk-merge.c static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
bio               471 block/blk-merge.c 	for_each_bio(bio) {
bio               472 block/blk-merge.c 		bio_for_each_bvec(bvec, bio, iter) {
bio               489 block/blk-merge.c 		if (likely(bio->bi_iter.bi_size)) {
bio               510 block/blk-merge.c 	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
bio               511 block/blk-merge.c 		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
bio               512 block/blk-merge.c 	else if (rq->bio)
bio               513 block/blk-merge.c 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
bio               551 block/blk-merge.c static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
bio               557 block/blk-merge.c 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
bio               572 block/blk-merge.c int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
bio               574 block/blk-merge.c 	if (req_gap_back_merge(req, bio))
bio               577 block/blk-merge.c 	    integrity_req_gap_back_merge(req, bio))
bio               579 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
bio               585 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
bio               588 block/blk-merge.c int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
bio               590 block/blk-merge.c 	if (req_gap_front_merge(req, bio))
bio               593 block/blk-merge.c 	    integrity_req_gap_front_merge(req, bio))
bio               595 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(bio) >
bio               596 block/blk-merge.c 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
bio               601 block/blk-merge.c 	return ll_new_hw_segment(req, bio, nr_segs);
bio               611 block/blk-merge.c 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
bio               627 block/blk-merge.c 	if (req_gap_back_merge(req, next->bio))
bio               661 block/blk-merge.c 	struct bio *bio;
bio               671 block/blk-merge.c 	for (bio = rq->bio; bio; bio = bio->bi_next) {
bio               672 block/blk-merge.c 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
bio               673 block/blk-merge.c 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
bio               674 block/blk-merge.c 		bio->bi_opf |= ff;
bio               738 block/blk-merge.c 	    !blk_write_same_mergeable(req->bio, next->bio))
bio               793 block/blk-merge.c 	req->biotail->bi_next = next->bio;
bio               810 block/blk-merge.c 	next->bio = NULL;
bio               848 block/blk-merge.c bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
bio               850 block/blk-merge.c 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
bio               853 block/blk-merge.c 	if (req_op(rq) != bio_op(bio))
bio               857 block/blk-merge.c 	if (bio_data_dir(bio) != rq_data_dir(rq))
bio               861 block/blk-merge.c 	if (rq->rq_disk != bio->bi_disk)
bio               865 block/blk-merge.c 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
bio               870 block/blk-merge.c 	    !blk_write_same_mergeable(rq->bio, bio))
bio               877 block/blk-merge.c 	if (rq->write_hint != bio->bi_write_hint)
bio               880 block/blk-merge.c 	if (rq->ioprio != bio_prio(bio))
bio               886 block/blk-merge.c enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
bio               890 block/blk-merge.c 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
bio               892 block/blk-merge.c 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
bio               226 block/blk-mq-sched.c bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
bio               231 block/blk-mq-sched.c 	switch (elv_merge(q, &rq, bio)) {
bio               233 block/blk-mq-sched.c 		if (!blk_mq_sched_allow_merge(q, rq, bio))
bio               235 block/blk-mq-sched.c 		if (!bio_attempt_back_merge(rq, bio, nr_segs))
bio               242 block/blk-mq-sched.c 		if (!blk_mq_sched_allow_merge(q, rq, bio))
bio               244 block/blk-mq-sched.c 		if (!bio_attempt_front_merge(rq, bio, nr_segs))
bio               251 block/blk-mq-sched.c 		return bio_attempt_discard_merge(q, rq, bio);
bio               263 block/blk-mq-sched.c 			   struct bio *bio, unsigned int nr_segs)
bio               274 block/blk-mq-sched.c 		if (!blk_rq_merge_ok(rq, bio))
bio               277 block/blk-mq-sched.c 		switch (blk_try_merge(rq, bio)) {
bio               279 block/blk-mq-sched.c 			if (blk_mq_sched_allow_merge(q, rq, bio))
bio               280 block/blk-mq-sched.c 				merged = bio_attempt_back_merge(rq, bio,
bio               284 block/blk-mq-sched.c 			if (blk_mq_sched_allow_merge(q, rq, bio))
bio               285 block/blk-mq-sched.c 				merged = bio_attempt_front_merge(rq, bio,
bio               289 block/blk-mq-sched.c 			merged = bio_attempt_discard_merge(q, rq, bio);
bio               309 block/blk-mq-sched.c 				 struct blk_mq_ctx *ctx, struct bio *bio,
bio               316 block/blk-mq-sched.c 	if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
bio               324 block/blk-mq-sched.c bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bio               329 block/blk-mq-sched.c 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
bio               334 block/blk-mq-sched.c 		return e->type->ops.bio_merge(hctx, bio, nr_segs);
bio               341 block/blk-mq-sched.c 		ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
bio                14 block/blk-mq-sched.h bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
bio                16 block/blk-mq-sched.h bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bio                35 block/blk-mq-sched.h blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bio                38 block/blk-mq-sched.h 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
bio                41 block/blk-mq-sched.h 	return __blk_mq_sched_bio_merge(q, bio, nr_segs);
bio                46 block/blk-mq-sched.h 			 struct bio *bio)
bio                51 block/blk-mq-sched.h 		return e->type->ops.allow_merge(q, rq, bio);
bio               357 block/blk-mq.c 					  struct bio *bio,
bio               414 block/blk-mq.c 			e->type->ops.prepare_request(rq, bio);
bio               441 block/blk-mq.c 	rq->bio = rq->biotail = NULL;
bio              1783 block/blk-mq.c static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
bio              1786 block/blk-mq.c 	if (bio->bi_opf & REQ_RAHEAD)
bio              1789 block/blk-mq.c 	rq->__sector = bio->bi_iter.bi_sector;
bio              1790 block/blk-mq.c 	rq->write_hint = bio->bi_write_hint;
bio              1791 block/blk-mq.c 	blk_rq_bio_prep(rq, bio, nr_segs);
bio              1953 block/blk-mq.c static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
bio              1955 block/blk-mq.c 	const int is_sync = op_is_sync(bio->bi_opf);
bio              1956 block/blk-mq.c 	const int is_flush_fua = op_is_flush(bio->bi_opf);
bio              1964 block/blk-mq.c 	blk_queue_bounce(q, &bio);
bio              1965 block/blk-mq.c 	__blk_queue_split(q, &bio, &nr_segs);
bio              1967 block/blk-mq.c 	if (!bio_integrity_prep(bio))
bio              1971 block/blk-mq.c 	    blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
bio              1974 block/blk-mq.c 	if (blk_mq_sched_bio_merge(q, bio, nr_segs))
bio              1977 block/blk-mq.c 	rq_qos_throttle(q, bio);
bio              1979 block/blk-mq.c 	data.cmd_flags = bio->bi_opf;
bio              1980 block/blk-mq.c 	rq = blk_mq_get_request(q, bio, &data);
bio              1982 block/blk-mq.c 		rq_qos_cleanup(q, bio);
bio              1983 block/blk-mq.c 		if (bio->bi_opf & REQ_NOWAIT)
bio              1984 block/blk-mq.c 			bio_wouldblock_error(bio);
bio              1988 block/blk-mq.c 	trace_block_getrq(q, bio, bio->bi_opf);
bio              1990 block/blk-mq.c 	rq_qos_track(q, rq, bio);
bio              1994 block/blk-mq.c 	blk_mq_bio_to_request(rq, bio, nr_segs);
bio              1996 block/blk-mq.c 	plug = blk_mq_plug(q, bio);
bio               256 block/blk-mq.h 					   struct bio *bio)
bio               262 block/blk-mq.h 	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
bio                32 block/blk-rq-qos.c void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
bio                36 block/blk-rq-qos.c 			rqos->ops->cleanup(rqos, bio);
bio                68 block/blk-rq-qos.c void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
bio                72 block/blk-rq-qos.c 			rqos->ops->throttle(rqos, bio);
bio                77 block/blk-rq-qos.c void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
bio                81 block/blk-rq-qos.c 			rqos->ops->track(rqos, rq, bio);
bio                86 block/blk-rq-qos.c void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
bio                90 block/blk-rq-qos.c 			rqos->ops->merge(rqos, rq, bio);
bio                95 block/blk-rq-qos.c void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
bio                99 block/blk-rq-qos.c 			rqos->ops->done_bio(rqos, bio);
bio                37 block/blk-rq-qos.h 	void (*throttle)(struct rq_qos *, struct bio *);
bio                38 block/blk-rq-qos.h 	void (*track)(struct rq_qos *, struct request *, struct bio *);
bio                39 block/blk-rq-qos.h 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
bio                43 block/blk-rq-qos.h 	void (*done_bio)(struct rq_qos *, struct bio *);
bio                44 block/blk-rq-qos.h 	void (*cleanup)(struct rq_qos *, struct bio *);
bio               134 block/blk-rq-qos.h void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
bio               138 block/blk-rq-qos.h void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
bio               139 block/blk-rq-qos.h void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
bio               140 block/blk-rq-qos.h void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
bio               141 block/blk-rq-qos.h void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
bio               144 block/blk-rq-qos.h static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
bio               147 block/blk-rq-qos.h 		__rq_qos_cleanup(q->rq_qos, bio);
bio               168 block/blk-rq-qos.h static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
bio               171 block/blk-rq-qos.h 		__rq_qos_done_bio(q->rq_qos, bio);
bio               174 block/blk-rq-qos.h static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
bio               180 block/blk-rq-qos.h 	bio_set_flag(bio, BIO_TRACKED);
bio               182 block/blk-rq-qos.h 		__rq_qos_throttle(q->rq_qos, bio);
bio               186 block/blk-rq-qos.h 				struct bio *bio)
bio               189 block/blk-rq-qos.h 		__rq_qos_track(q->rq_qos, rq, bio);
bio               193 block/blk-rq-qos.h 				struct bio *bio)
bio               196 block/blk-rq-qos.h 		__rq_qos_merge(q->rq_qos, rq, bio);
bio               381 block/blk-throttle.c static inline unsigned int throtl_bio_data_size(struct bio *bio)
bio               384 block/blk-throttle.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
bio               386 block/blk-throttle.c 	return bio->bi_iter.bi_size;
bio               406 block/blk-throttle.c static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
bio               409 block/blk-throttle.c 	bio_list_add(&qn->bios, bio);
bio               420 block/blk-throttle.c static struct bio *throtl_peek_queued(struct list_head *queued)
bio               423 block/blk-throttle.c 	struct bio *bio;
bio               428 block/blk-throttle.c 	bio = bio_list_peek(&qn->bios);
bio               429 block/blk-throttle.c 	WARN_ON_ONCE(!bio);
bio               430 block/blk-throttle.c 	return bio;
bio               447 block/blk-throttle.c static struct bio *throtl_pop_queued(struct list_head *queued,
bio               451 block/blk-throttle.c 	struct bio *bio;
bio               456 block/blk-throttle.c 	bio = bio_list_pop(&qn->bios);
bio               457 block/blk-throttle.c 	WARN_ON_ONCE(!bio);
bio               469 block/blk-throttle.c 	return bio;
bio               878 block/blk-throttle.c static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
bio               881 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio               920 block/blk-throttle.c static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bio               923 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio               926 block/blk-throttle.c 	unsigned int bio_size = throtl_bio_data_size(bio);
bio               967 block/blk-throttle.c static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
bio               970 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio               980 block/blk-throttle.c 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
bio              1006 block/blk-throttle.c 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
bio              1007 block/blk-throttle.c 	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
bio              1024 block/blk-throttle.c static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
bio              1026 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio              1027 block/blk-throttle.c 	unsigned int bio_size = throtl_bio_data_size(bio);
bio              1041 block/blk-throttle.c 	if (!bio_flagged(bio, BIO_THROTTLED))
bio              1042 block/blk-throttle.c 		bio_set_flag(bio, BIO_THROTTLED);
bio              1054 block/blk-throttle.c static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
bio              1058 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio              1072 block/blk-throttle.c 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
bio              1082 block/blk-throttle.c 	struct bio *bio;
bio              1084 block/blk-throttle.c 	bio = throtl_peek_queued(&sq->queued[READ]);
bio              1085 block/blk-throttle.c 	if (bio)
bio              1086 block/blk-throttle.c 		tg_may_dispatch(tg, bio, &read_wait);
bio              1088 block/blk-throttle.c 	bio = throtl_peek_queued(&sq->queued[WRITE]);
bio              1089 block/blk-throttle.c 	if (bio)
bio              1090 block/blk-throttle.c 		tg_may_dispatch(tg, bio, &write_wait);
bio              1120 block/blk-throttle.c 	struct bio *bio;
bio              1128 block/blk-throttle.c 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
bio              1131 block/blk-throttle.c 	throtl_charge_bio(tg, bio);
bio              1141 block/blk-throttle.c 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
bio              1144 block/blk-throttle.c 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
bio              1162 block/blk-throttle.c 	struct bio *bio;
bio              1166 block/blk-throttle.c 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
bio              1167 block/blk-throttle.c 	       tg_may_dispatch(tg, bio, NULL)) {
bio              1169 block/blk-throttle.c 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
bio              1176 block/blk-throttle.c 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
bio              1177 block/blk-throttle.c 	       tg_may_dispatch(tg, bio, NULL)) {
bio              1179 block/blk-throttle.c 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
bio              1310 block/blk-throttle.c 	struct bio *bio;
bio              1318 block/blk-throttle.c 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio              1319 block/blk-throttle.c 			bio_list_add(&bio_list_on_stack, bio);
bio              1324 block/blk-throttle.c 		while((bio = bio_list_pop(&bio_list_on_stack)))
bio              1325 block/blk-throttle.c 			generic_make_request(bio);
bio              2118 block/blk-throttle.c 		    struct bio *bio)
bio              2123 block/blk-throttle.c 	bool rw = bio_data_dir(bio);
bio              2130 block/blk-throttle.c 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
bio              2152 block/blk-throttle.c 		if (!tg_may_dispatch(tg, bio, NULL)) {
bio              2162 block/blk-throttle.c 		throtl_charge_bio(tg, bio);
bio              2192 block/blk-throttle.c 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
bio              2200 block/blk-throttle.c 	throtl_add_bio_tg(bio, qn, tg);
bio              2217 block/blk-throttle.c 	bio_set_flag(bio, BIO_THROTTLED);
bio              2221 block/blk-throttle.c 		bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
bio              2255 block/blk-throttle.c void blk_throtl_bio_endio(struct bio *bio)
bio              2263 block/blk-throttle.c 	int rw = bio_data_dir(bio);
bio              2265 block/blk-throttle.c 	blkg = bio->bi_blkg;
bio              2273 block/blk-throttle.c 	start_time = bio_issue_time(&bio->bi_issue) >> 10;
bio              2280 block/blk-throttle.c 	if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
bio              2281 block/blk-throttle.c 		throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
bio              2282 block/blk-throttle.c 				     bio_op(bio), lat);
bio              2288 block/blk-throttle.c 		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
bio              2319 block/blk-throttle.c 		struct bio *bio;
bio              2323 block/blk-throttle.c 		while ((bio = throtl_peek_queued(&sq->queued[READ])))
bio              2324 block/blk-throttle.c 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
bio              2325 block/blk-throttle.c 		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
bio              2326 block/blk-throttle.c 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
bio              2342 block/blk-throttle.c 	struct bio *bio;
bio              2364 block/blk-throttle.c 		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
bio              2366 block/blk-throttle.c 			generic_make_request(bio);
bio               529 block/blk-wbt.c static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
bio               531 block/blk-wbt.c 	switch (bio_op(bio)) {
bio               536 block/blk-wbt.c 		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
bio               547 block/blk-wbt.c static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
bio               554 block/blk-wbt.c 	if (bio_op(bio) == REQ_OP_READ) {
bio               556 block/blk-wbt.c 	} else if (wbt_should_throttle(rwb, bio)) {
bio               559 block/blk-wbt.c 		if (bio_op(bio) == REQ_OP_DISCARD)
bio               566 block/blk-wbt.c static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
bio               569 block/blk-wbt.c 	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
bio               579 block/blk-wbt.c static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
bio               584 block/blk-wbt.c 	flags = bio_to_wbt_flags(rwb, bio);
bio               591 block/blk-wbt.c 	__wbt_wait(rwb, flags, bio->bi_opf);
bio               597 block/blk-wbt.c static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
bio               600 block/blk-wbt.c 	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
bio               211 block/blk-zoned.c 	struct bio *bio = bio_alloc(gfp_mask, 0);
bio               215 block/blk-zoned.c 	bio_set_dev(bio, bdev);
bio               216 block/blk-zoned.c 	bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
bio               218 block/blk-zoned.c 	ret = submit_bio_wait(bio);
bio               219 block/blk-zoned.c 	bio_put(bio);
bio               260 block/blk-zoned.c 	struct bio *bio = NULL;
bio               289 block/blk-zoned.c 		bio = blk_next_bio(bio, 0, gfp_mask);
bio               290 block/blk-zoned.c 		bio->bi_iter.bi_sector = sector;
bio               291 block/blk-zoned.c 		bio_set_dev(bio, bdev);
bio               292 block/blk-zoned.c 		bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
bio               301 block/blk-zoned.c 	ret = submit_bio_wait(bio);
bio               302 block/blk-zoned.c 	bio_put(bio);
bio               110 block/blk.h    static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
bio               114 block/blk.h    	rq->__data_len = bio->bi_iter.bi_size;
bio               115 block/blk.h    	rq->bio = rq->biotail = bio;
bio               116 block/blk.h    	rq->ioprio = bio_prio(bio);
bio               118 block/blk.h    	if (bio->bi_disk)
bio               119 block/blk.h    		rq->rq_disk = bio->bi_disk;
bio               124 block/blk.h    bool __bio_integrity_endio(struct bio *);
bio               125 block/blk.h    void bio_integrity_free(struct bio *bio);
bio               126 block/blk.h    static inline bool bio_integrity_endio(struct bio *bio)
bio               128 block/blk.h    	if (bio_integrity(bio))
bio               129 block/blk.h    		return __bio_integrity_endio(bio);
bio               134 block/blk.h    		struct bio *next)
bio               136 block/blk.h    	struct bio_integrity_payload *bip = bio_integrity(req->bio);
bio               144 block/blk.h    		struct bio *bio)
bio               146 block/blk.h    	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               147 block/blk.h    	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
bio               154 block/blk.h    		struct bio *next)
bio               159 block/blk.h    		struct bio *bio)
bio               167 block/blk.h    static inline bool bio_integrity_endio(struct bio *bio)
bio               171 block/blk.h    static inline void bio_integrity_free(struct bio *bio)
bio               179 block/blk.h    bool bio_attempt_front_merge(struct request *req, struct bio *bio,
bio               181 block/blk.h    bool bio_attempt_back_merge(struct request *req, struct bio *bio,
bio               184 block/blk.h    		struct bio *bio);
bio               185 block/blk.h    bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
bio               229 block/blk.h    void __blk_queue_split(struct request_queue *q, struct bio **bio,
bio               231 block/blk.h    int ll_back_merge_fn(struct request *req, struct bio *bio,
bio               233 block/blk.h    int ll_front_merge_fn(struct request *req,  struct bio *bio,
bio               241 block/blk.h    bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
bio               242 block/blk.h    enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
bio               326 block/blk.h    extern void blk_throtl_bio_endio(struct bio *bio);
bio               329 block/blk.h    static inline void blk_throtl_bio_endio(struct bio *bio) { }
bio               335 block/blk.h    extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
bio               341 block/blk.h    static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
bio               352 block/blk.h    struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
bio               132 block/bounce.c static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
bio               162 block/bounce.c static void bounce_end_io(struct bio *bio, mempool_t *pool)
bio               164 block/bounce.c 	struct bio *bio_orig = bio->bi_private;
bio               172 block/bounce.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               181 block/bounce.c 	bio_orig->bi_status = bio->bi_status;
bio               183 block/bounce.c 	bio_put(bio);
bio               186 block/bounce.c static void bounce_end_io_write(struct bio *bio)
bio               188 block/bounce.c 	bounce_end_io(bio, &page_pool);
bio               191 block/bounce.c static void bounce_end_io_write_isa(struct bio *bio)
bio               194 block/bounce.c 	bounce_end_io(bio, &isa_page_pool);
bio               197 block/bounce.c static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
bio               199 block/bounce.c 	struct bio *bio_orig = bio->bi_private;
bio               201 block/bounce.c 	if (!bio->bi_status)
bio               202 block/bounce.c 		copy_to_high_bio_irq(bio_orig, bio);
bio               204 block/bounce.c 	bounce_end_io(bio, pool);
bio               207 block/bounce.c static void bounce_end_io_read(struct bio *bio)
bio               209 block/bounce.c 	__bounce_end_io_read(bio, &page_pool);
bio               212 block/bounce.c static void bounce_end_io_read_isa(struct bio *bio)
bio               214 block/bounce.c 	__bounce_end_io_read(bio, &isa_page_pool);
bio               217 block/bounce.c static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
bio               222 block/bounce.c 	struct bio *bio;
bio               246 block/bounce.c 	bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
bio               247 block/bounce.c 	if (!bio)
bio               249 block/bounce.c 	bio->bi_disk		= bio_src->bi_disk;
bio               250 block/bounce.c 	bio->bi_opf		= bio_src->bi_opf;
bio               251 block/bounce.c 	bio->bi_ioprio		= bio_src->bi_ioprio;
bio               252 block/bounce.c 	bio->bi_write_hint	= bio_src->bi_write_hint;
bio               253 block/bounce.c 	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
bio               254 block/bounce.c 	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
bio               256 block/bounce.c 	switch (bio_op(bio)) {
bio               262 block/bounce.c 		bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
bio               266 block/bounce.c 			bio->bi_io_vec[bio->bi_vcnt++] = bv;
bio               273 block/bounce.c 		ret = bio_integrity_clone(bio, bio_src, gfp_mask);
bio               275 block/bounce.c 			bio_put(bio);
bio               280 block/bounce.c 	bio_clone_blkg_association(bio, bio_src);
bio               281 block/bounce.c 	blkcg_bio_issue_init(bio);
bio               283 block/bounce.c 	return bio;
bio               286 block/bounce.c static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio               289 block/bounce.c 	struct bio *bio;
bio               308 block/bounce.c 		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio               309 block/bounce.c 		bio_chain(bio, *bio_orig);
bio               311 block/bounce.c 		*bio_orig = bio;
bio               313 block/bounce.c 	bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
bio               321 block/bounce.c 	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
bio               344 block/bounce.c 	bio->bi_flags |= (1 << BIO_BOUNCED);
bio               347 block/bounce.c 		bio->bi_end_io = bounce_end_io_write;
bio               349 block/bounce.c 			bio->bi_end_io = bounce_end_io_read;
bio               351 block/bounce.c 		bio->bi_end_io = bounce_end_io_write_isa;
bio               353 block/bounce.c 			bio->bi_end_io = bounce_end_io_read_isa;
bio               356 block/bounce.c 	bio->bi_private = *bio_orig;
bio               357 block/bounce.c 	*bio_orig = bio;
bio               360 block/bounce.c void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
bio                60 block/bsg-lib.c 		job->bidi_bio = job->bidi_rq->bio;
bio               228 block/bsg-lib.c 	if (req->bio) {
bio               138 block/bsg.c    	struct bio *bio;
bio               182 block/bsg.c    	bio = rq->bio;
bio               186 block/bsg.c    	blk_rq_unmap_user(bio);
bio                60 block/elevator.c static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
bio                66 block/elevator.c 		return e->type->ops.allow_merge(q, rq, bio);
bio                74 block/elevator.c bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
bio                76 block/elevator.c 	if (!blk_rq_merge_ok(rq, bio))
bio                79 block/elevator.c 	if (!elv_iosched_allow_bio_merge(rq, bio))
bio               305 block/elevator.c 		struct bio *bio)
bio               316 block/elevator.c 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
bio               322 block/elevator.c 	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
bio               323 block/elevator.c 		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
bio               337 block/elevator.c 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
bio               338 block/elevator.c 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
bio               344 block/elevator.c 		return e->type->ops.request_merge(q, req, bio);
bio               565 block/kyber-iosched.c static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
bio               571 block/kyber-iosched.c 	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
bio               576 block/kyber-iosched.c 	merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
bio               582 block/kyber-iosched.c static void kyber_prepare_request(struct request *rq, struct bio *bio)
bio               440 block/mq-deadline.c 			    struct bio *bio)
bio               443 block/mq-deadline.c 	sector_t sector = bio_end_sector(bio);
bio               449 block/mq-deadline.c 	__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
bio               453 block/mq-deadline.c 		if (elv_bio_merge_ok(__rq, bio)) {
bio               462 block/mq-deadline.c static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
bio               471 block/mq-deadline.c 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
bio               544 block/mq-deadline.c static void dd_prepare_request(struct request *rq, struct bio *bio)
bio                54 block/partition-generic.c const char *bio_devname(struct bio *bio, char *buf)
bio                56 block/partition-generic.c 	return disk_name(bio->bi_disk, bio->bi_partno, buf);
bio               243 block/scsi_ioctl.c 				 struct bio *bio)
bio               271 block/scsi_ioctl.c 	r = blk_rq_unmap_user(bio);
bio               287 block/scsi_ioctl.c 	struct bio *bio;
bio               348 block/scsi_ioctl.c 	bio = rq->bio;
bio               361 block/scsi_ioctl.c 	ret = blk_complete_sghdr_rq(rq, hdr, bio);
bio               136 block/t10-pi.c 	struct bio *bio;
bio               138 block/t10-pi.c 	__rq_for_each_bio(bio, rq) {
bio               139 block/t10-pi.c 		struct bio_integrity_payload *bip = bio_integrity(bio);
bio               188 block/t10-pi.c 	struct bio *bio;
bio               190 block/t10-pi.c 	__rq_for_each_bio(bio, rq) {
bio               191 block/t10-pi.c 		struct bio_integrity_payload *bip = bio_integrity(bio);
bio              1474 drivers/block/amiflop.c 		data = bio_data(rq->bio) + 512 * cnt;
bio               109 drivers/block/aoe/aoe.h 	struct bio *bio;
bio               183 drivers/block/aoe/aoe.h 		struct bio *nxbio;
bio               297 drivers/block/aoe/aoecmd.c skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
bio               302 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter)
bio               352 drivers/block/aoe/aoecmd.c 	if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
bio               353 drivers/block/aoe/aoecmd.c 		skb_fillup(skb, f->buf->bio, f->iter);
bio               389 drivers/block/aoe/aoecmd.c 	bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
bio               826 drivers/block/aoe/aoecmd.c bufinit(struct buf *buf, struct request *rq, struct bio *bio)
bio               830 drivers/block/aoe/aoecmd.c 	buf->bio = bio;
bio               831 drivers/block/aoe/aoecmd.c 	buf->iter = bio->bi_iter;
bio               841 drivers/block/aoe/aoecmd.c 	struct bio *bio;
bio               857 drivers/block/aoe/aoecmd.c 		d->ip.nxbio = rq->bio;
bio               861 drivers/block/aoe/aoecmd.c 		__rq_for_each_bio(bio, rq)
bio               869 drivers/block/aoe/aoecmd.c 	bio = d->ip.nxbio;
bio               870 drivers/block/aoe/aoecmd.c 	bufinit(buf, rq, bio);
bio               871 drivers/block/aoe/aoecmd.c 	bio = bio->bi_next;
bio               872 drivers/block/aoe/aoecmd.c 	d->ip.nxbio = bio;
bio               873 drivers/block/aoe/aoecmd.c 	if (bio == NULL)
bio              1023 drivers/block/aoe/aoecmd.c bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
bio              1030 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter) {
bio              1041 drivers/block/aoe/aoecmd.c 	struct bio *bio;
bio              1050 drivers/block/aoe/aoecmd.c 		bio = rq->bio;
bio              1051 drivers/block/aoe/aoecmd.c 		bok = !fastfail && !bio->bi_status;
bio              1054 drivers/block/aoe/aoecmd.c 	} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
bio              1113 drivers/block/aoe/aoecmd.c 			buf->bio->bi_status = BLK_STS_IOERR;
bio              1126 drivers/block/aoe/aoecmd.c 			buf->bio->bi_status = BLK_STS_IOERR;
bio              1134 drivers/block/aoe/aoecmd.c 			buf->bio->bi_status = BLK_STS_IOERR;
bio              1137 drivers/block/aoe/aoecmd.c 		bvcpy(skb, f->buf->bio, f->iter, n);
bio              1635 drivers/block/aoe/aoecmd.c 	buf->bio->bi_status = BLK_STS_IOERR;
bio               164 drivers/block/aoe/aoedev.c 	struct bio *bio;
bio               172 drivers/block/aoe/aoedev.c 	while ((bio = d->ip.nxbio)) {
bio               173 drivers/block/aoe/aoedev.c 		bio->bi_status = BLK_STS_IOERR;
bio               174 drivers/block/aoe/aoedev.c 		d->ip.nxbio = bio->bi_next;
bio              1546 drivers/block/ataflop.c 	ReqBuffer = bio_data(fd_request->bio);
bio               285 drivers/block/brd.c static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
bio               287 drivers/block/brd.c 	struct brd_device *brd = bio->bi_disk->private_data;
bio               292 drivers/block/brd.c 	sector = bio->bi_iter.bi_sector;
bio               293 drivers/block/brd.c 	if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
bio               296 drivers/block/brd.c 	bio_for_each_segment(bvec, bio, iter) {
bio               301 drivers/block/brd.c 				  bio_op(bio), sector);
bio               307 drivers/block/brd.c 	bio_endio(bio);
bio               310 drivers/block/brd.c 	bio_io_error(bio);
bio               129 drivers/block/drbd/drbd_actlog.c 	struct bio *bio;
bio               141 drivers/block/drbd/drbd_actlog.c 	bio = bio_alloc_drbd(GFP_NOIO);
bio               142 drivers/block/drbd/drbd_actlog.c 	bio_set_dev(bio, bdev->md_bdev);
bio               143 drivers/block/drbd/drbd_actlog.c 	bio->bi_iter.bi_sector = sector;
bio               145 drivers/block/drbd/drbd_actlog.c 	if (bio_add_page(bio, device->md_io.page, size, 0) != size)
bio               147 drivers/block/drbd/drbd_actlog.c 	bio->bi_private = device;
bio               148 drivers/block/drbd/drbd_actlog.c 	bio->bi_end_io = drbd_md_endio;
bio               149 drivers/block/drbd/drbd_actlog.c 	bio_set_op_attrs(bio, op, op_flags);
bio               161 drivers/block/drbd/drbd_actlog.c 	bio_get(bio); /* one bio_put() is in the completion handler */
bio               165 drivers/block/drbd/drbd_actlog.c 		bio_io_error(bio);
bio               167 drivers/block/drbd/drbd_actlog.c 		submit_bio(bio);
bio               169 drivers/block/drbd/drbd_actlog.c 	if (!bio->bi_status)
bio               173 drivers/block/drbd/drbd_actlog.c 	bio_put(bio);
bio               939 drivers/block/drbd/drbd_bitmap.c static void drbd_bm_endio(struct bio *bio)
bio               941 drivers/block/drbd/drbd_bitmap.c 	struct drbd_bm_aio_ctx *ctx = bio->bi_private;
bio               944 drivers/block/drbd/drbd_bitmap.c 	unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
bio               950 drivers/block/drbd/drbd_bitmap.c 	if (bio->bi_status) {
bio               953 drivers/block/drbd/drbd_bitmap.c 		ctx->error = blk_status_to_errno(bio->bi_status);
bio               959 drivers/block/drbd/drbd_bitmap.c 					bio->bi_status, idx);
bio               968 drivers/block/drbd/drbd_bitmap.c 		mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
bio               970 drivers/block/drbd/drbd_bitmap.c 	bio_put(bio);
bio               981 drivers/block/drbd/drbd_bitmap.c 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
bio              1011 drivers/block/drbd/drbd_bitmap.c 	bio_set_dev(bio, device->ldev->md_bdev);
bio              1012 drivers/block/drbd/drbd_bitmap.c 	bio->bi_iter.bi_sector = on_disk_sector;
bio              1015 drivers/block/drbd/drbd_bitmap.c 	bio_add_page(bio, page, len, 0);
bio              1016 drivers/block/drbd/drbd_bitmap.c 	bio->bi_private = ctx;
bio              1017 drivers/block/drbd/drbd_bitmap.c 	bio->bi_end_io = drbd_bm_endio;
bio              1018 drivers/block/drbd/drbd_bitmap.c 	bio_set_op_attrs(bio, op, 0);
bio              1021 drivers/block/drbd/drbd_bitmap.c 		bio_io_error(bio);
bio              1023 drivers/block/drbd/drbd_bitmap.c 		submit_bio(bio);
bio               282 drivers/block/drbd/drbd_int.h 	struct bio *private_bio;
bio               297 drivers/block/drbd/drbd_int.h 	struct bio *master_bio;       /* master bio pointer */
bio              1427 drivers/block/drbd/drbd_int.h extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
bio              1453 drivers/block/drbd/drbd_int.h extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
bio              1454 drivers/block/drbd/drbd_int.h extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
bio              1491 drivers/block/drbd/drbd_int.h extern void drbd_md_endio(struct bio *bio);
bio              1492 drivers/block/drbd/drbd_int.h extern void drbd_peer_request_endio(struct bio *bio);
bio              1493 drivers/block/drbd/drbd_int.h extern void drbd_request_endio(struct bio *bio);
bio              1522 drivers/block/drbd/drbd_int.h extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
bio              1608 drivers/block/drbd/drbd_int.h 					     int fault_type, struct bio *bio)
bio              1611 drivers/block/drbd/drbd_int.h 	if (!bio->bi_disk) {
bio              1613 drivers/block/drbd/drbd_int.h 		bio->bi_status = BLK_STS_IOERR;
bio              1614 drivers/block/drbd/drbd_int.h 		bio_endio(bio);
bio              1619 drivers/block/drbd/drbd_int.h 		bio_io_error(bio);
bio              1621 drivers/block/drbd/drbd_int.h 		generic_make_request(bio);
bio               140 drivers/block/drbd/drbd_main.c struct bio *bio_alloc_drbd(gfp_t gfp_mask)
bio               142 drivers/block/drbd/drbd_main.c 	struct bio *bio;
bio               147 drivers/block/drbd/drbd_main.c 	bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
bio               148 drivers/block/drbd/drbd_main.c 	if (!bio)
bio               150 drivers/block/drbd/drbd_main.c 	return bio;
bio              1588 drivers/block/drbd/drbd_main.c static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
bio              1594 drivers/block/drbd/drbd_main.c 	bio_for_each_segment(bvec, bio, iter) {
bio              1604 drivers/block/drbd/drbd_main.c 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
bio              1610 drivers/block/drbd/drbd_main.c static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
bio              1616 drivers/block/drbd/drbd_main.c 	bio_for_each_segment(bvec, bio, iter) {
bio              1625 drivers/block/drbd/drbd_main.c 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
bio              1652 drivers/block/drbd/drbd_main.c 			     struct bio *bio)
bio              1655 drivers/block/drbd/drbd_main.c 		return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
bio              1656 drivers/block/drbd/drbd_main.c 			(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
bio              1657 drivers/block/drbd/drbd_main.c 			(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
bio              1658 drivers/block/drbd/drbd_main.c 			(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
bio              1659 drivers/block/drbd/drbd_main.c 			(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
bio              1660 drivers/block/drbd/drbd_main.c 			(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
bio              1662 drivers/block/drbd/drbd_main.c 			   (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
bio              1666 drivers/block/drbd/drbd_main.c 		return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
bio              2294 drivers/block/drbd/drbd_main.c 		struct bio *bio = req->master_bio;
bio              2330 drivers/block/drbd/drbd_main.c 		__drbd_make_request(device, bio, start_jif);
bio              1256 drivers/block/drbd/drbd_receiver.c static void one_flush_endio(struct bio *bio)
bio              1258 drivers/block/drbd/drbd_receiver.c 	struct one_flush_context *octx = bio->bi_private;
bio              1262 drivers/block/drbd/drbd_receiver.c 	if (bio->bi_status) {
bio              1263 drivers/block/drbd/drbd_receiver.c 		ctx->error = blk_status_to_errno(bio->bi_status);
bio              1264 drivers/block/drbd/drbd_receiver.c 		drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
bio              1267 drivers/block/drbd/drbd_receiver.c 	bio_put(bio);
bio              1279 drivers/block/drbd/drbd_receiver.c 	struct bio *bio = bio_alloc(GFP_NOIO, 0);
bio              1281 drivers/block/drbd/drbd_receiver.c 	if (!bio || !octx) {
bio              1287 drivers/block/drbd/drbd_receiver.c 		if (bio)
bio              1288 drivers/block/drbd/drbd_receiver.c 			bio_put(bio);
bio              1298 drivers/block/drbd/drbd_receiver.c 	bio_set_dev(bio, device->ldev->backing_bdev);
bio              1299 drivers/block/drbd/drbd_receiver.c 	bio->bi_private = octx;
bio              1300 drivers/block/drbd/drbd_receiver.c 	bio->bi_end_io = one_flush_endio;
bio              1301 drivers/block/drbd/drbd_receiver.c 	bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
bio              1306 drivers/block/drbd/drbd_receiver.c 	submit_bio(bio);
bio              1641 drivers/block/drbd/drbd_receiver.c 	struct bio *bios = NULL;
bio              1642 drivers/block/drbd/drbd_receiver.c 	struct bio *bio;
bio              1689 drivers/block/drbd/drbd_receiver.c 	bio = bio_alloc(GFP_NOIO, nr_pages);
bio              1690 drivers/block/drbd/drbd_receiver.c 	if (!bio) {
bio              1695 drivers/block/drbd/drbd_receiver.c 	bio->bi_iter.bi_sector = sector;
bio              1696 drivers/block/drbd/drbd_receiver.c 	bio_set_dev(bio, device->ldev->backing_bdev);
bio              1697 drivers/block/drbd/drbd_receiver.c 	bio_set_op_attrs(bio, op, op_flags);
bio              1698 drivers/block/drbd/drbd_receiver.c 	bio->bi_private = peer_req;
bio              1699 drivers/block/drbd/drbd_receiver.c 	bio->bi_end_io = drbd_peer_request_endio;
bio              1701 drivers/block/drbd/drbd_receiver.c 	bio->bi_next = bios;
bio              1702 drivers/block/drbd/drbd_receiver.c 	bios = bio;
bio              1707 drivers/block/drbd/drbd_receiver.c 		if (!bio_add_page(bio, page, len, 0))
bio              1721 drivers/block/drbd/drbd_receiver.c 		bio = bios;
bio              1723 drivers/block/drbd/drbd_receiver.c 		bio->bi_next = NULL;
bio              1725 drivers/block/drbd/drbd_receiver.c 		drbd_generic_make_request(device, fault_type, bio);
bio              1731 drivers/block/drbd/drbd_receiver.c 		bio = bios;
bio              1733 drivers/block/drbd/drbd_receiver.c 		bio_put(bio);
bio              2013 drivers/block/drbd/drbd_receiver.c 	struct bio *bio;
bio              2031 drivers/block/drbd/drbd_receiver.c 	bio = req->master_bio;
bio              2032 drivers/block/drbd/drbd_receiver.c 	D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio              2034 drivers/block/drbd/drbd_receiver.c 	bio_for_each_segment(bvec, bio, iter) {
bio              2045 drivers/block/drbd/drbd_receiver.c 		drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
bio              2469 drivers/block/drbd/drbd_receiver.c 		if (m.bio)
bio              5732 drivers/block/drbd/drbd_receiver.c 	if (m.bio)
bio                42 drivers/block/drbd/drbd_req.c static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
bio               198 drivers/block/drbd/drbd_req.c 	m->bio->bi_status = errno_to_blk_status(m->error);
bio               199 drivers/block/drbd/drbd_req.c 	bio_endio(m->bio);
bio               290 drivers/block/drbd/drbd_req.c 		m->bio = req->master_bio;
bio               575 drivers/block/drbd/drbd_req.c 		m->bio = NULL;
bio              1159 drivers/block/drbd/drbd_req.c 	struct bio *bio = req->private_bio;
bio              1162 drivers/block/drbd/drbd_req.c 	if (bio_op(bio) != REQ_OP_READ)
bio              1164 drivers/block/drbd/drbd_req.c 	else if (bio->bi_opf & REQ_RAHEAD)
bio              1169 drivers/block/drbd/drbd_req.c 	bio_set_dev(bio, device->ldev->backing_bdev);
bio              1178 drivers/block/drbd/drbd_req.c 			bio_io_error(bio);
bio              1179 drivers/block/drbd/drbd_req.c 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
bio              1181 drivers/block/drbd/drbd_req.c 			    ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
bio              1182 drivers/block/drbd/drbd_req.c 		else if (bio_op(bio) == REQ_OP_DISCARD)
bio              1185 drivers/block/drbd/drbd_req.c 			generic_make_request(bio);
bio              1188 drivers/block/drbd/drbd_req.c 		bio_io_error(bio);
bio              1209 drivers/block/drbd/drbd_req.c drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
bio              1211 drivers/block/drbd/drbd_req.c 	const int rw = bio_data_dir(bio);
bio              1215 drivers/block/drbd/drbd_req.c 	req = drbd_req_new(device, bio);
bio              1221 drivers/block/drbd/drbd_req.c 		bio->bi_status = BLK_STS_RESOURCE;
bio              1222 drivers/block/drbd/drbd_req.c 		bio_endio(bio);
bio              1236 drivers/block/drbd/drbd_req.c 	if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio              1237 drivers/block/drbd/drbd_req.c 	    bio_op(bio) == REQ_OP_DISCARD)
bio              1434 drivers/block/drbd/drbd_req.c 	if (m.bio)
bio              1438 drivers/block/drbd/drbd_req.c void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
bio              1440 drivers/block/drbd/drbd_req.c 	struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
bio              1615 drivers/block/drbd/drbd_req.c blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
bio              1620 drivers/block/drbd/drbd_req.c 	blk_queue_split(q, &bio);
bio              1627 drivers/block/drbd/drbd_req.c 	D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
bio              1630 drivers/block/drbd/drbd_req.c 	__drbd_make_request(device, bio, start_jif);
bio               259 drivers/block/drbd/drbd_req.h static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
bio               261 drivers/block/drbd/drbd_req.h 	struct bio *bio;
bio               262 drivers/block/drbd/drbd_req.h 	bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
bio               264 drivers/block/drbd/drbd_req.h 	req->private_bio = bio;
bio               266 drivers/block/drbd/drbd_req.h 	bio->bi_private  = req;
bio               267 drivers/block/drbd/drbd_req.h 	bio->bi_end_io   = drbd_request_endio;
bio               268 drivers/block/drbd/drbd_req.h 	bio->bi_next     = NULL;
bio               275 drivers/block/drbd/drbd_req.h 	struct bio *bio;
bio               305 drivers/block/drbd/drbd_req.h 	if (m.bio)
bio               327 drivers/block/drbd/drbd_req.h 	if (m.bio)
bio                49 drivers/block/drbd/drbd_worker.c void drbd_md_endio(struct bio *bio)
bio                53 drivers/block/drbd/drbd_worker.c 	device = bio->bi_private;
bio                54 drivers/block/drbd/drbd_worker.c 	device->md_io.error = blk_status_to_errno(bio->bi_status);
bio                59 drivers/block/drbd/drbd_worker.c 	bio_put(bio);
bio               170 drivers/block/drbd/drbd_worker.c void drbd_peer_request_endio(struct bio *bio)
bio               172 drivers/block/drbd/drbd_worker.c 	struct drbd_peer_request *peer_req = bio->bi_private;
bio               174 drivers/block/drbd/drbd_worker.c 	bool is_write = bio_data_dir(bio) == WRITE;
bio               175 drivers/block/drbd/drbd_worker.c 	bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio               176 drivers/block/drbd/drbd_worker.c 			  bio_op(bio) == REQ_OP_DISCARD;
bio               178 drivers/block/drbd/drbd_worker.c 	if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
bio               181 drivers/block/drbd/drbd_worker.c 					: "read", bio->bi_status,
bio               184 drivers/block/drbd/drbd_worker.c 	if (bio->bi_status)
bio               187 drivers/block/drbd/drbd_worker.c 	bio_put(bio); /* no need for the bio anymore */
bio               205 drivers/block/drbd/drbd_worker.c void drbd_request_endio(struct bio *bio)
bio               208 drivers/block/drbd/drbd_worker.c 	struct drbd_request *req = bio->bi_private;
bio               245 drivers/block/drbd/drbd_worker.c 		if (!bio->bi_status)
bio               250 drivers/block/drbd/drbd_worker.c 	if (unlikely(bio->bi_status)) {
bio               251 drivers/block/drbd/drbd_worker.c 		switch (bio_op(bio)) {
bio               254 drivers/block/drbd/drbd_worker.c 			if (bio->bi_status == BLK_STS_NOTSUPP)
bio               260 drivers/block/drbd/drbd_worker.c 			if (bio->bi_opf & REQ_RAHEAD)
bio               273 drivers/block/drbd/drbd_worker.c 	req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
bio               274 drivers/block/drbd/drbd_worker.c 	bio_put(bio);
bio               282 drivers/block/drbd/drbd_worker.c 	if (m.bio)
bio               315 drivers/block/drbd/drbd_worker.c void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
bio               325 drivers/block/drbd/drbd_worker.c 	bio_for_each_segment(bvec, bio, iter) {
bio               334 drivers/block/drbd/drbd_worker.c 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
bio              2359 drivers/block/floppy.c 	    raw_cmd->kernel_data == bio_data(current_req->bio)) {
bio              2378 drivers/block/floppy.c 	base = bio_data(current_req->bio);
bio              2648 drivers/block/floppy.c 	} else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
bio              2662 drivers/block/floppy.c 			     ((unsigned long)bio_data(current_req->bio))) >> 9;
bio              2666 drivers/block/floppy.c 		if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
bio              2668 drivers/block/floppy.c 				    ((unsigned long)bio_data(current_req->bio)) %
bio              2685 drivers/block/floppy.c 			raw_cmd->kernel_data = bio_data(current_req->bio);
bio              2739 drivers/block/floppy.c 	    (raw_cmd->kernel_data != bio_data(current_req->bio) &&
bio              2747 drivers/block/floppy.c 		if (raw_cmd->kernel_data != bio_data(current_req->bio))
bio              2764 drivers/block/floppy.c 	if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
bio              4133 drivers/block/floppy.c static void floppy_rb0_cb(struct bio *bio)
bio              4135 drivers/block/floppy.c 	struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
bio              4138 drivers/block/floppy.c 	if (bio->bi_status) {
bio              4140 drivers/block/floppy.c 			bio->bi_status);
bio              4148 drivers/block/floppy.c 	struct bio bio;
bio              4166 drivers/block/floppy.c 	bio_init(&bio, &bio_vec, 1);
bio              4167 drivers/block/floppy.c 	bio_set_dev(&bio, bdev);
bio              4168 drivers/block/floppy.c 	bio_add_page(&bio, page, size, 0);
bio              4170 drivers/block/floppy.c 	bio.bi_iter.bi_sector = 0;
bio              4171 drivers/block/floppy.c 	bio.bi_flags |= (1 << BIO_QUIET);
bio              4172 drivers/block/floppy.c 	bio.bi_private = &cbdata;
bio              4173 drivers/block/floppy.c 	bio.bi_end_io = floppy_rb0_cb;
bio              4174 drivers/block/floppy.c 	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
bio              4178 drivers/block/floppy.c 	submit_bio(&bio);
bio               358 drivers/block/loop.c 			struct bio *bio;
bio               360 drivers/block/loop.c 			__rq_for_each_bio(bio, rq)
bio               361 drivers/block/loop.c 				zero_fill_bio(bio);
bio               406 drivers/block/loop.c 			struct bio *bio;
bio               408 drivers/block/loop.c 			__rq_for_each_bio(bio, rq)
bio               409 drivers/block/loop.c 				zero_fill_bio(bio);
bio               479 drivers/block/loop.c 			struct bio *bio = rq->bio;
bio               481 drivers/block/loop.c 			while (bio) {
bio               482 drivers/block/loop.c 				zero_fill_bio(bio);
bio               483 drivers/block/loop.c 				bio = bio->bi_next;
bio               520 drivers/block/loop.c 	struct bio *bio = rq->bio;
bio               530 drivers/block/loop.c 	if (rq->bio != rq->biotail) {
bio               556 drivers/block/loop.c 		offset = bio->bi_iter.bi_bvec_done;
bio               557 drivers/block/loop.c 		bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
bio              1946 drivers/block/loop.c 	if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
bio              1947 drivers/block/loop.c 		cmd->css = &bio_blkcg(rq->bio)->css;
bio               525 drivers/block/nbd.c 	struct bio *bio;
bio               605 drivers/block/nbd.c 	bio = req->bio;
bio               606 drivers/block/nbd.c 	while (bio) {
bio               607 drivers/block/nbd.c 		struct bio *next = bio->bi_next;
bio               611 drivers/block/nbd.c 		bio_for_each_segment(bvec, bio, iter) {
bio               652 drivers/block/nbd.c 		bio = next;
bio                21 drivers/block/null_blk.h 	struct bio *bio;
bio               626 drivers/block/null_blk_main.c 		cmd->bio->bi_status = cmd->error;
bio               627 drivers/block/null_blk_main.c 		bio_endio(cmd->bio);
bio              1089 drivers/block/null_blk_main.c 	struct bio *bio = cmd->bio;
bio              1097 drivers/block/null_blk_main.c 	sector = bio->bi_iter.bi_sector;
bio              1099 drivers/block/null_blk_main.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio              1101 drivers/block/null_blk_main.c 			bio_sectors(bio) << SECTOR_SHIFT);
bio              1106 drivers/block/null_blk_main.c 	bio_for_each_segment(bvec, bio, iter) {
bio              1109 drivers/block/null_blk_main.c 				     op_is_write(bio_op(bio)), sector,
bio              1110 drivers/block/null_blk_main.c 				     bio->bi_opf & REQ_FUA);
bio              1284 drivers/block/null_blk_main.c static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
bio              1286 drivers/block/null_blk_main.c 	sector_t sector = bio->bi_iter.bi_sector;
bio              1287 drivers/block/null_blk_main.c 	sector_t nr_sectors = bio_sectors(bio);
bio              1293 drivers/block/null_blk_main.c 	cmd->bio = bio;
bio              1295 drivers/block/null_blk_main.c 	null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
bio               808 drivers/block/paride/pcd.c 	pcd_buf = bio_data(pcd_req->bio);
bio               498 drivers/block/paride/pd.c 		pd_buf = bio_data(pd_req->bio);
bio               535 drivers/block/paride/pd.c 		pd_buf = bio_data(pd_req->bio);
bio               856 drivers/block/paride/pf.c 	pf_buf = bio_data(pf_req->bio);
bio               901 drivers/block/paride/pf.c 		pf_buf = bio_data(pf_req->bio);
bio               546 drivers/block/pktcdvd.c 		struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
bio               547 drivers/block/pktcdvd.c 		if (!bio)
bio               550 drivers/block/pktcdvd.c 		pkt->r_bios[i] = bio;
bio               557 drivers/block/pktcdvd.c 		struct bio *bio = pkt->r_bios[i];
bio               558 drivers/block/pktcdvd.c 		if (bio)
bio               559 drivers/block/pktcdvd.c 			bio_put(bio);
bio               581 drivers/block/pktcdvd.c 		struct bio *bio = pkt->r_bios[i];
bio               582 drivers/block/pktcdvd.c 		if (bio)
bio               583 drivers/block/pktcdvd.c 			bio_put(bio);
bio               655 drivers/block/pktcdvd.c 		if (s <= tmp->bio->bi_iter.bi_sector)
bio               664 drivers/block/pktcdvd.c 	if (s > tmp->bio->bi_iter.bi_sector) {
bio               669 drivers/block/pktcdvd.c 	BUG_ON(s > tmp->bio->bi_iter.bi_sector);
bio               680 drivers/block/pktcdvd.c 	sector_t s = node->bio->bi_iter.bi_sector;
bio               686 drivers/block/pktcdvd.c 		if (s < tmp->bio->bi_iter.bi_sector)
bio               812 drivers/block/pktcdvd.c static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
bio               815 drivers/block/pktcdvd.c 	if (bio_data_dir(bio) == READ)
bio               816 drivers/block/pktcdvd.c 		bio_list_add(&pd->iosched.read_queue, bio);
bio               818 drivers/block/pktcdvd.c 		bio_list_add(&pd->iosched.write_queue, bio);
bio               849 drivers/block/pktcdvd.c 		struct bio *bio;
bio               863 drivers/block/pktcdvd.c 			bio = bio_list_peek(&pd->iosched.write_queue);
bio               865 drivers/block/pktcdvd.c 			if (bio && (bio->bi_iter.bi_sector ==
bio               888 drivers/block/pktcdvd.c 			bio = bio_list_pop(&pd->iosched.write_queue);
bio               890 drivers/block/pktcdvd.c 			bio = bio_list_pop(&pd->iosched.read_queue);
bio               893 drivers/block/pktcdvd.c 		if (!bio)
bio               896 drivers/block/pktcdvd.c 		if (bio_data_dir(bio) == READ)
bio               898 drivers/block/pktcdvd.c 				bio->bi_iter.bi_size >> 10;
bio               901 drivers/block/pktcdvd.c 			pd->iosched.last_write = bio_end_sector(bio);
bio               916 drivers/block/pktcdvd.c 		generic_make_request(bio);
bio               947 drivers/block/pktcdvd.c static void pkt_end_io_read(struct bio *bio)
bio               949 drivers/block/pktcdvd.c 	struct packet_data *pkt = bio->bi_private;
bio               954 drivers/block/pktcdvd.c 		bio, (unsigned long long)pkt->sector,
bio               955 drivers/block/pktcdvd.c 		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
bio               957 drivers/block/pktcdvd.c 	if (bio->bi_status)
bio               966 drivers/block/pktcdvd.c static void pkt_end_io_packet_write(struct bio *bio)
bio               968 drivers/block/pktcdvd.c 	struct packet_data *pkt = bio->bi_private;
bio               972 drivers/block/pktcdvd.c 	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
bio               988 drivers/block/pktcdvd.c 	struct bio *bio;
bio              1002 drivers/block/pktcdvd.c 	bio_list_for_each(bio, &pkt->orig_bios) {
bio              1003 drivers/block/pktcdvd.c 		int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
bio              1005 drivers/block/pktcdvd.c 		int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
bio              1029 drivers/block/pktcdvd.c 		bio = pkt->r_bios[f];
bio              1030 drivers/block/pktcdvd.c 		bio_reset(bio);
bio              1031 drivers/block/pktcdvd.c 		bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio              1032 drivers/block/pktcdvd.c 		bio_set_dev(bio, pd->bdev);
bio              1033 drivers/block/pktcdvd.c 		bio->bi_end_io = pkt_end_io_read;
bio              1034 drivers/block/pktcdvd.c 		bio->bi_private = pkt;
bio              1040 drivers/block/pktcdvd.c 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
bio              1044 drivers/block/pktcdvd.c 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio              1045 drivers/block/pktcdvd.c 		pkt_queue_bio(pd, bio);
bio              1125 drivers/block/pktcdvd.c 	bio_reset(pkt->bio);
bio              1126 drivers/block/pktcdvd.c 	bio_set_dev(pkt->bio, pd->bdev);
bio              1127 drivers/block/pktcdvd.c 	bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
bio              1128 drivers/block/pktcdvd.c 	pkt->bio->bi_iter.bi_sector = new_sector;
bio              1129 drivers/block/pktcdvd.c 	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
bio              1130 drivers/block/pktcdvd.c 	pkt->bio->bi_vcnt = pkt->frames;
bio              1132 drivers/block/pktcdvd.c 	pkt->bio->bi_end_io = pkt_end_io_packet_write;
bio              1133 drivers/block/pktcdvd.c 	pkt->bio->bi_private = pkt;
bio              1165 drivers/block/pktcdvd.c 	struct bio *bio = NULL;
bio              1190 drivers/block/pktcdvd.c 		bio = node->bio;
bio              1191 drivers/block/pktcdvd.c 		zone = get_zone(bio->bi_iter.bi_sector, pd);
bio              1194 drivers/block/pktcdvd.c 				bio = NULL;
bio              1210 drivers/block/pktcdvd.c 	if (!bio) {
bio              1229 drivers/block/pktcdvd.c 		bio = node->bio;
bio              1231 drivers/block/pktcdvd.c 			get_zone(bio->bi_iter.bi_sector, pd));
bio              1232 drivers/block/pktcdvd.c 		if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
bio              1236 drivers/block/pktcdvd.c 		bio_list_add(&pkt->orig_bios, bio);
bio              1237 drivers/block/pktcdvd.c 		pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
bio              1310 drivers/block/pktcdvd.c 	struct bio *bio;
bio              1316 drivers/block/pktcdvd.c 	while ((bio = bio_list_pop(&pkt->orig_bios))) {
bio              1317 drivers/block/pktcdvd.c 		bio->bi_status = status;
bio              1318 drivers/block/pktcdvd.c 		bio_endio(bio);
bio              2321 drivers/block/pktcdvd.c static void pkt_end_io_read_cloned(struct bio *bio)
bio              2323 drivers/block/pktcdvd.c 	struct packet_stacked_data *psd = bio->bi_private;
bio              2326 drivers/block/pktcdvd.c 	psd->bio->bi_status = bio->bi_status;
bio              2327 drivers/block/pktcdvd.c 	bio_put(bio);
bio              2328 drivers/block/pktcdvd.c 	bio_endio(psd->bio);
bio              2333 drivers/block/pktcdvd.c static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
bio              2335 drivers/block/pktcdvd.c 	struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
bio              2339 drivers/block/pktcdvd.c 	psd->bio = bio;
bio              2343 drivers/block/pktcdvd.c 	pd->stats.secs_r += bio_sectors(bio);
bio              2347 drivers/block/pktcdvd.c static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
bio              2355 drivers/block/pktcdvd.c 	zone = get_zone(bio->bi_iter.bi_sector, pd);
bio              2368 drivers/block/pktcdvd.c 				bio_list_add(&pkt->orig_bios, bio);
bio              2370 drivers/block/pktcdvd.c 					bio->bi_iter.bi_size / CD_FRAMESIZE;
bio              2408 drivers/block/pktcdvd.c 	node->bio = bio;
bio              2431 drivers/block/pktcdvd.c static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
bio              2435 drivers/block/pktcdvd.c 	struct bio *split;
bio              2437 drivers/block/pktcdvd.c 	blk_queue_split(q, &bio);
bio              2441 drivers/block/pktcdvd.c 		pr_err("%s incorrect request queue\n", bio_devname(bio, b));
bio              2446 drivers/block/pktcdvd.c 		(unsigned long long)bio->bi_iter.bi_sector,
bio              2447 drivers/block/pktcdvd.c 		(unsigned long long)bio_end_sector(bio));
bio              2452 drivers/block/pktcdvd.c 	if (bio_data_dir(bio) == READ) {
bio              2453 drivers/block/pktcdvd.c 		pkt_make_request_read(pd, bio);
bio              2459 drivers/block/pktcdvd.c 			   (unsigned long long)bio->bi_iter.bi_sector);
bio              2463 drivers/block/pktcdvd.c 	if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
bio              2469 drivers/block/pktcdvd.c 		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
bio              2470 drivers/block/pktcdvd.c 		sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
bio              2475 drivers/block/pktcdvd.c 			split = bio_split(bio, last_zone -
bio              2476 drivers/block/pktcdvd.c 					  bio->bi_iter.bi_sector,
bio              2478 drivers/block/pktcdvd.c 			bio_chain(split, bio);
bio              2480 drivers/block/pktcdvd.c 			split = bio;
bio              2484 drivers/block/pktcdvd.c 	} while (split != bio);
bio              2488 drivers/block/pktcdvd.c 	bio_io_error(bio);
bio                94 drivers/block/ps3disk.c 			__func__, __LINE__, i, bio_sectors(iter.bio),
bio                95 drivers/block/ps3disk.c 			iter.bio->bi_iter.bi_sector);
bio               537 drivers/block/ps3vram.c static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
bio               538 drivers/block/ps3vram.c 				  struct bio *bio)
bio               541 drivers/block/ps3vram.c 	int write = bio_data_dir(bio) == WRITE;
bio               543 drivers/block/ps3vram.c 	loff_t offset = bio->bi_iter.bi_sector << 9;
bio               547 drivers/block/ps3vram.c 	struct bio *next;
bio               549 drivers/block/ps3vram.c 	bio_for_each_segment(bvec, bio, iter) {
bio               583 drivers/block/ps3vram.c 	bio->bi_status = error;
bio               584 drivers/block/ps3vram.c 	bio_endio(bio);
bio               588 drivers/block/ps3vram.c static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
bio               596 drivers/block/ps3vram.c 	blk_queue_split(q, &bio);
bio               600 drivers/block/ps3vram.c 	bio_list_add(&priv->list, bio);
bio               607 drivers/block/ps3vram.c 		bio = ps3vram_do_bio(dev, bio);
bio               608 drivers/block/ps3vram.c 	} while (bio);
bio              2806 drivers/block/rbd.c 				 u64 off, u64 len, struct bio *bio)
bio              2809 drivers/block/rbd.c 	struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
bio              4892 drivers/block/rbd.c 					       rq->bio);
bio                45 drivers/block/rsxx/dev.c 	struct bio	*bio;
bio                99 drivers/block/rsxx/dev.c static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
bio               101 drivers/block/rsxx/dev.c 	generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
bio               106 drivers/block/rsxx/dev.c 				struct bio *bio,
bio               109 drivers/block/rsxx/dev.c 	generic_end_io_acct(card->queue, bio_op(bio),
bio               124 drivers/block/rsxx/dev.c 			disk_stats_complete(card, meta->bio, meta->start_time);
bio               127 drivers/block/rsxx/dev.c 			bio_io_error(meta->bio);
bio               129 drivers/block/rsxx/dev.c 			bio_endio(meta->bio);
bio               134 drivers/block/rsxx/dev.c static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
bio               140 drivers/block/rsxx/dev.c 	blk_queue_split(q, &bio);
bio               147 drivers/block/rsxx/dev.c 	if (bio_end_sector(bio) > get_capacity(card->gendisk))
bio               156 drivers/block/rsxx/dev.c 	if (bio->bi_iter.bi_size == 0) {
bio               167 drivers/block/rsxx/dev.c 	bio_meta->bio = bio;
bio               173 drivers/block/rsxx/dev.c 		disk_stats_start(card, bio);
bio               176 drivers/block/rsxx/dev.c 		 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
bio               177 drivers/block/rsxx/dev.c 		 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
bio               179 drivers/block/rsxx/dev.c 	st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio               190 drivers/block/rsxx/dev.c 		bio->bi_status = st;
bio               191 drivers/block/rsxx/dev.c 	bio_endio(bio);
bio               667 drivers/block/rsxx/dma.c 			   struct bio *bio,
bio               686 drivers/block/rsxx/dma.c 	addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
bio               694 drivers/block/rsxx/dma.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio               695 drivers/block/rsxx/dma.c 		bv_len = bio->bi_iter.bi_size;
bio               712 drivers/block/rsxx/dma.c 		bio_for_each_segment(bvec, bio, iter) {
bio               724 drivers/block/rsxx/dma.c 							bio_data_dir(bio),
bio               381 drivers/block/rsxx/rsxx_priv.h 			   struct bio *bio,
bio               522 drivers/block/skd_main.c 	if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
bio               545 drivers/block/swim.c 					  bio_data(req->bio));
bio               449 drivers/block/swim3.c 		init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
bio               453 drivers/block/swim3.c 		init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
bio               107 drivers/block/umem.c 	struct bio	*bio, *currentbio, **biotail;
bio               116 drivers/block/umem.c 		struct bio		*bio, **biotail;
bio               327 drivers/block/umem.c 	page->bio = NULL;
bio               328 drivers/block/umem.c 	page->biotail = &page->bio;
bio               342 drivers/block/umem.c 	struct bio *bio;
bio               345 drivers/block/umem.c 	bio = card->currentbio;
bio               346 drivers/block/umem.c 	if (!bio && card->bio) {
bio               347 drivers/block/umem.c 		card->currentbio = card->bio;
bio               348 drivers/block/umem.c 		card->current_iter = card->bio->bi_iter;
bio               349 drivers/block/umem.c 		card->bio = card->bio->bi_next;
bio               350 drivers/block/umem.c 		if (card->bio == NULL)
bio               351 drivers/block/umem.c 			card->biotail = &card->bio;
bio               355 drivers/block/umem.c 	if (!bio)
bio               361 drivers/block/umem.c 	vec = bio_iter_iovec(bio, card->current_iter);
bio               367 drivers/block/umem.c 				  bio_op(bio) == REQ_OP_READ ?
bio               373 drivers/block/umem.c 	if (p->bio == NULL)
bio               375 drivers/block/umem.c 	if ((p->biotail) != &bio->bi_next) {
bio               376 drivers/block/umem.c 		*(p->biotail) = bio;
bio               377 drivers/block/umem.c 		p->biotail = &(bio->bi_next);
bio               378 drivers/block/umem.c 		bio->bi_next = NULL;
bio               396 drivers/block/umem.c 	if (bio_op(bio) == REQ_OP_WRITE)
bio               401 drivers/block/umem.c 	bio_advance_iter(bio, &card->current_iter, vec.bv_len);
bio               417 drivers/block/umem.c 	struct bio *return_bio = NULL;
bio               427 drivers/block/umem.c 		struct bio *bio = page->bio;
bio               439 drivers/block/umem.c 		vec = bio_iter_iovec(bio, page->iter);
bio               440 drivers/block/umem.c 		bio_advance_iter(bio, &page->iter, vec.bv_len);
bio               443 drivers/block/umem.c 			page->bio = bio->bi_next;
bio               444 drivers/block/umem.c 			if (page->bio)
bio               445 drivers/block/umem.c 				page->iter = page->bio->bi_iter;
bio               454 drivers/block/umem.c 			bio->bi_status = BLK_STS_IOERR;
bio               460 drivers/block/umem.c 		} else if (op_is_write(bio_op(bio)) &&
bio               470 drivers/block/umem.c 		if (bio != page->bio) {
bio               471 drivers/block/umem.c 			bio->bi_next = return_bio;
bio               472 drivers/block/umem.c 			return_bio = bio;
bio               499 drivers/block/umem.c 		struct bio *bio = return_bio;
bio               501 drivers/block/umem.c 		return_bio = bio->bi_next;
bio               502 drivers/block/umem.c 		bio->bi_next = NULL;
bio               503 drivers/block/umem.c 		bio_endio(bio);
bio               522 drivers/block/umem.c static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
bio               526 drivers/block/umem.c 		 (unsigned long long)bio->bi_iter.bi_sector,
bio               527 drivers/block/umem.c 		 bio->bi_iter.bi_size);
bio               529 drivers/block/umem.c 	blk_queue_split(q, &bio);
bio               532 drivers/block/umem.c 	*card->biotail = bio;
bio               533 drivers/block/umem.c 	bio->bi_next = NULL;
bio               534 drivers/block/umem.c 	card->biotail = &bio->bi_next;
bio               535 drivers/block/umem.c 	if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
bio               884 drivers/block/umem.c 	card->bio = NULL;
bio               885 drivers/block/umem.c 	card->biotail = &card->bio;
bio               198 drivers/block/virtio_blk.c 	struct bio *bio;
bio               208 drivers/block/virtio_blk.c 	__rq_for_each_bio(bio, req) {
bio               209 drivers/block/virtio_blk.c 		u64 sector = bio->bi_iter.bi_sector;
bio               210 drivers/block/virtio_blk.c 		u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
bio              1110 drivers/block/xen-blkback/blkback.c static void end_block_io_op(struct bio *bio)
bio              1112 drivers/block/xen-blkback/blkback.c 	__end_block_io_op(bio->bi_private, bio->bi_status);
bio              1113 drivers/block/xen-blkback/blkback.c 	bio_put(bio);
bio              1231 drivers/block/xen-blkback/blkback.c 	struct bio *bio = NULL;
bio              1232 drivers/block/xen-blkback/blkback.c 	struct bio **biolist = pending_req->biolist;
bio              1363 drivers/block/xen-blkback/blkback.c 		while ((bio == NULL) ||
bio              1364 drivers/block/xen-blkback/blkback.c 		       (bio_add_page(bio,
bio              1370 drivers/block/xen-blkback/blkback.c 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
bio              1371 drivers/block/xen-blkback/blkback.c 			if (unlikely(bio == NULL))
bio              1374 drivers/block/xen-blkback/blkback.c 			biolist[nbio++] = bio;
bio              1375 drivers/block/xen-blkback/blkback.c 			bio_set_dev(bio, preq.bdev);
bio              1376 drivers/block/xen-blkback/blkback.c 			bio->bi_private = pending_req;
bio              1377 drivers/block/xen-blkback/blkback.c 			bio->bi_end_io  = end_block_io_op;
bio              1378 drivers/block/xen-blkback/blkback.c 			bio->bi_iter.bi_sector  = preq.sector_number;
bio              1379 drivers/block/xen-blkback/blkback.c 			bio_set_op_attrs(bio, operation, operation_flags);
bio              1386 drivers/block/xen-blkback/blkback.c 	if (!bio) {
bio              1389 drivers/block/xen-blkback/blkback.c 		bio = bio_alloc(GFP_KERNEL, 0);
bio              1390 drivers/block/xen-blkback/blkback.c 		if (unlikely(bio == NULL))
bio              1393 drivers/block/xen-blkback/blkback.c 		biolist[nbio++] = bio;
bio              1394 drivers/block/xen-blkback/blkback.c 		bio_set_dev(bio, preq.bdev);
bio              1395 drivers/block/xen-blkback/blkback.c 		bio->bi_private = pending_req;
bio              1396 drivers/block/xen-blkback/blkback.c 		bio->bi_end_io  = end_block_io_op;
bio              1397 drivers/block/xen-blkback/blkback.c 		bio_set_op_attrs(bio, operation, operation_flags);
bio               354 drivers/block/xen-blkback/common.h 	struct bio		*biolist[MAX_INDIRECT_SEGMENTS];
bio              2021 drivers/block/xen-blkfront.c 	struct bio *bio;
bio              2059 drivers/block/xen-blkfront.c 	while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
bio              2061 drivers/block/xen-blkfront.c 		submit_bio(bio);
bio              2110 drivers/block/xen-blkfront.c 			merge_bio.head = shadow[j].request->bio;
bio              2113 drivers/block/xen-blkfront.c 			shadow[j].request->bio = NULL;
bio               670 drivers/block/xsysace.c 		ace->data_ptr = bio_data(req->bio);
bio               743 drivers/block/xsysace.c 			ace->data_ptr = bio_data(ace->req->bio);
bio                91 drivers/block/z2ram.c 		void *buffer = bio_data(req->bio);
bio                56 drivers/block/zram/zram_drv.c 				u32 index, int offset, struct bio *bio);
bio               577 drivers/block/zram/zram_drv.c static void zram_page_end_io(struct bio *bio)
bio               579 drivers/block/zram/zram_drv.c 	struct page *page = bio_first_page_all(bio);
bio               581 drivers/block/zram/zram_drv.c 	page_endio(page, op_is_write(bio_op(bio)),
bio               582 drivers/block/zram/zram_drv.c 			blk_status_to_errno(bio->bi_status));
bio               583 drivers/block/zram/zram_drv.c 	bio_put(bio);
bio               590 drivers/block/zram/zram_drv.c 			unsigned long entry, struct bio *parent)
bio               592 drivers/block/zram/zram_drv.c 	struct bio *bio;
bio               594 drivers/block/zram/zram_drv.c 	bio = bio_alloc(GFP_ATOMIC, 1);
bio               595 drivers/block/zram/zram_drv.c 	if (!bio)
bio               598 drivers/block/zram/zram_drv.c 	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
bio               599 drivers/block/zram/zram_drv.c 	bio_set_dev(bio, zram->bdev);
bio               600 drivers/block/zram/zram_drv.c 	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio               601 drivers/block/zram/zram_drv.c 		bio_put(bio);
bio               606 drivers/block/zram/zram_drv.c 		bio->bi_opf = REQ_OP_READ;
bio               607 drivers/block/zram/zram_drv.c 		bio->bi_end_io = zram_page_end_io;
bio               609 drivers/block/zram/zram_drv.c 		bio->bi_opf = parent->bi_opf;
bio               610 drivers/block/zram/zram_drv.c 		bio_chain(bio, parent);
bio               613 drivers/block/zram/zram_drv.c 	submit_bio(bio);
bio               626 drivers/block/zram/zram_drv.c 	struct bio bio;
bio               711 drivers/block/zram/zram_drv.c 		bio_init(&bio, &bio_vec, 1);
bio               712 drivers/block/zram/zram_drv.c 		bio_set_dev(&bio, zram->bdev);
bio               713 drivers/block/zram/zram_drv.c 		bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
bio               714 drivers/block/zram/zram_drv.c 		bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio               716 drivers/block/zram/zram_drv.c 		bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
bio               722 drivers/block/zram/zram_drv.c 		ret = submit_bio_wait(&bio);
bio               776 drivers/block/zram/zram_drv.c 	struct bio *bio;
bio               786 drivers/block/zram/zram_drv.c 	struct bio *bio = zw->bio;
bio               788 drivers/block/zram/zram_drv.c 	read_from_bdev_async(zram, &zw->bvec, entry, bio);
bio               797 drivers/block/zram/zram_drv.c 				unsigned long entry, struct bio *bio)
bio               804 drivers/block/zram/zram_drv.c 	work.bio = bio;
bio               815 drivers/block/zram/zram_drv.c 				unsigned long entry, struct bio *bio)
bio               823 drivers/block/zram/zram_drv.c 			unsigned long entry, struct bio *parent, bool sync)
bio               834 drivers/block/zram/zram_drv.c 			unsigned long entry, struct bio *parent, bool sync)
bio              1213 drivers/block/zram/zram_drv.c 				struct bio *bio, bool partial_io)
bio              1231 drivers/block/zram/zram_drv.c 				bio, partial_io);
bio              1274 drivers/block/zram/zram_drv.c 				u32 index, int offset, struct bio *bio)
bio              1287 drivers/block/zram/zram_drv.c 	ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
bio              1307 drivers/block/zram/zram_drv.c 				u32 index, struct bio *bio)
bio              1423 drivers/block/zram/zram_drv.c 				u32 index, int offset, struct bio *bio)
bio              1441 drivers/block/zram/zram_drv.c 		ret = __zram_bvec_read(zram, page, index, bio, true);
bio              1456 drivers/block/zram/zram_drv.c 	ret = __zram_bvec_write(zram, &vec, index, bio);
bio              1469 drivers/block/zram/zram_drv.c 			     int offset, struct bio *bio)
bio              1471 drivers/block/zram/zram_drv.c 	size_t n = bio->bi_iter.bi_size;
bio              1507 drivers/block/zram/zram_drv.c 			int offset, unsigned int op, struct bio *bio)
bio              1518 drivers/block/zram/zram_drv.c 		ret = zram_bvec_read(zram, bvec, index, offset, bio);
bio              1522 drivers/block/zram/zram_drv.c 		ret = zram_bvec_write(zram, bvec, index, offset, bio);
bio              1541 drivers/block/zram/zram_drv.c static void __zram_make_request(struct zram *zram, struct bio *bio)
bio              1548 drivers/block/zram/zram_drv.c 	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
bio              1549 drivers/block/zram/zram_drv.c 	offset = (bio->bi_iter.bi_sector &
bio              1552 drivers/block/zram/zram_drv.c 	switch (bio_op(bio)) {
bio              1555 drivers/block/zram/zram_drv.c 		zram_bio_discard(zram, index, offset, bio);
bio              1556 drivers/block/zram/zram_drv.c 		bio_endio(bio);
bio              1562 drivers/block/zram/zram_drv.c 	bio_for_each_segment(bvec, bio, iter) {
bio              1570 drivers/block/zram/zram_drv.c 					 bio_op(bio), bio) < 0)
bio              1580 drivers/block/zram/zram_drv.c 	bio_endio(bio);
bio              1584 drivers/block/zram/zram_drv.c 	bio_io_error(bio);
bio              1590 drivers/block/zram/zram_drv.c static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
bio              1594 drivers/block/zram/zram_drv.c 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio              1595 drivers/block/zram/zram_drv.c 					bio->bi_iter.bi_size)) {
bio              1600 drivers/block/zram/zram_drv.c 	__zram_make_request(zram, bio);
bio              1604 drivers/block/zram/zram_drv.c 	bio_io_error(bio);
bio              2179 drivers/cdrom/cdrom.c 	struct bio *bio;
bio              2229 drivers/cdrom/cdrom.c 		bio = rq->bio;
bio              2241 drivers/cdrom/cdrom.c 		if (blk_rq_unmap_user(bio))
bio               583 drivers/cdrom/gdrom.c 	__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
bio               191 drivers/ide/ide-cd.c 			bio_sectors = max(bio_sectors(failed_command->bio), 4U);
bio               220 drivers/ide/ide-cd.c 	void *sense = bio_data(rq->bio);
bio               779 drivers/ide/ide-cd.c 		if (uptodate == 0 && rq->bio)
bio               854 drivers/ide/ide-cd.c 	if (rq->bio) {
bio               856 drivers/ide/ide-cd.c 		char *buf = bio_data(rq->bio);
bio                80 drivers/ide/ide-floppy.c 		u8 *buf = bio_data(rq->bio);
bio               273 drivers/ide/ide-tape.c 	u8 *sense = bio_data(rq->bio);
bio               832 drivers/lightnvm/core.c 	struct bio bio;
bio               841 drivers/lightnvm/core.c 	bio_init(&bio, &bio_vec, 1);
bio               842 drivers/lightnvm/core.c 	bio_add_page(&bio, page, PAGE_SIZE, 0);
bio               843 drivers/lightnvm/core.c 	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
bio               845 drivers/lightnvm/core.c 	rqd.bio = &bio;
bio                21 drivers/lightnvm/pblk-cache.c void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
bio                26 drivers/lightnvm/pblk-cache.c 	sector_t lba = pblk_get_lba(bio);
bio                29 drivers/lightnvm/pblk-cache.c 	int nr_entries = pblk_get_secs(bio);
bio                32 drivers/lightnvm/pblk-cache.c 	generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
bio                40 drivers/lightnvm/pblk-cache.c 	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
bio                47 drivers/lightnvm/pblk-cache.c 		bio_io_error(bio);
bio                53 drivers/lightnvm/pblk-cache.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio                58 drivers/lightnvm/pblk-cache.c 	if (unlikely(!bio_has_data(bio)))
bio                62 drivers/lightnvm/pblk-cache.c 		void *data = bio_data(bio);
bio                69 drivers/lightnvm/pblk-cache.c 		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
bio                86 drivers/lightnvm/pblk-cache.c 		bio_endio(bio);
bio               323 drivers/lightnvm/pblk-core.c void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
bio               330 drivers/lightnvm/pblk-core.c 	for (i = 0; i < bio->bi_vcnt; i++) {
bio               331 drivers/lightnvm/pblk-core.c 		bv = &bio->bi_io_vec[i];
bio               339 drivers/lightnvm/pblk-core.c int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
bio               349 drivers/lightnvm/pblk-core.c 		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
bio               359 drivers/lightnvm/pblk-core.c 	pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
bio               465 drivers/lightnvm/pblk-core.c void pblk_discard(struct pblk *pblk, struct bio *bio)
bio               467 drivers/lightnvm/pblk-core.c 	sector_t slba = pblk_get_lba(bio);
bio               468 drivers/lightnvm/pblk-core.c 	sector_t nr_secs = pblk_get_secs(bio);
bio               862 drivers/lightnvm/pblk-core.c 	rqd->bio = NULL;
bio              2127 drivers/lightnvm/pblk-core.c 			rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
bio              2142 drivers/lightnvm/pblk-core.c 	page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
bio                50 drivers/lightnvm/pblk-init.c static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
bio                54 drivers/lightnvm/pblk-init.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio                55 drivers/lightnvm/pblk-init.c 		pblk_discard(pblk, bio);
bio                56 drivers/lightnvm/pblk-init.c 		if (!(bio->bi_opf & REQ_PREFLUSH)) {
bio                57 drivers/lightnvm/pblk-init.c 			bio_endio(bio);
bio                65 drivers/lightnvm/pblk-init.c 	if (bio_data_dir(bio) == READ) {
bio                66 drivers/lightnvm/pblk-init.c 		blk_queue_split(q, &bio);
bio                67 drivers/lightnvm/pblk-init.c 		pblk_submit_read(pblk, bio);
bio                73 drivers/lightnvm/pblk-init.c 		if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
bio                74 drivers/lightnvm/pblk-init.c 			blk_queue_split(q, &bio);
bio                76 drivers/lightnvm/pblk-init.c 		pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
bio               386 drivers/lightnvm/pblk-rb.c static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
bio               410 drivers/lightnvm/pblk-rb.c 	if (bio)
bio               411 drivers/lightnvm/pblk-rb.c 		bio_list_add(&entry->w_ctx.bios, bio);
bio               415 drivers/lightnvm/pblk-rb.c 	return bio ? 1 : 0;
bio               464 drivers/lightnvm/pblk-rb.c 				   unsigned int *pos, struct bio *bio,
bio               475 drivers/lightnvm/pblk-rb.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio               479 drivers/lightnvm/pblk-rb.c 		if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
bio               494 drivers/lightnvm/pblk-rb.c int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
bio               507 drivers/lightnvm/pblk-rb.c 	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
bio               557 drivers/lightnvm/pblk-rb.c 	struct bio *bio = rqd->bio;
bio               599 drivers/lightnvm/pblk-rb.c 		if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
bio               619 drivers/lightnvm/pblk-rb.c 		if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
bio               644 drivers/lightnvm/pblk-rb.c int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
bio               676 drivers/lightnvm/pblk-rb.c 	data = bio_data(bio);
bio                28 drivers/lightnvm/pblk-read.c static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
bio                37 drivers/lightnvm/pblk-read.c 	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
bio                41 drivers/lightnvm/pblk-read.c 				 struct bio *bio, sector_t blba,
bio                68 drivers/lightnvm/pblk-read.c 			if (!pblk_read_from_cache(pblk, bio, lba,
bio                94 drivers/lightnvm/pblk-read.c 		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
bio               179 drivers/lightnvm/pblk-read.c static void pblk_end_user_read(struct bio *bio, int error)
bio               182 drivers/lightnvm/pblk-read.c 		bio_io_error(bio);
bio               184 drivers/lightnvm/pblk-read.c 		bio_endio(bio);
bio               192 drivers/lightnvm/pblk-read.c 	struct bio *int_bio = rqd->bio;
bio               219 drivers/lightnvm/pblk-read.c 	struct bio *bio = (struct bio *)r_ctx->private;
bio               221 drivers/lightnvm/pblk-read.c 	pblk_end_user_read(bio, rqd->error);
bio               225 drivers/lightnvm/pblk-read.c static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
bio               249 drivers/lightnvm/pblk-read.c 		if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
bio               264 drivers/lightnvm/pblk-read.c void pblk_submit_read(struct pblk *pblk, struct bio *bio)
bio               268 drivers/lightnvm/pblk-read.c 	sector_t blba = pblk_get_lba(bio);
bio               269 drivers/lightnvm/pblk-read.c 	unsigned int nr_secs = pblk_get_secs(bio);
bio               273 drivers/lightnvm/pblk-read.c 	struct bio *int_bio, *split_bio;
bio               275 drivers/lightnvm/pblk-read.c 	generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
bio               290 drivers/lightnvm/pblk-read.c 		bio_io_error(bio);
bio               299 drivers/lightnvm/pblk-read.c 	int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
bio               308 drivers/lightnvm/pblk-read.c 	r_ctx->private = bio; /* original bio */
bio               309 drivers/lightnvm/pblk-read.c 	rqd->bio = int_bio; /* internal bio */
bio               313 drivers/lightnvm/pblk-read.c 		pblk_end_user_read(bio, 0);
bio               323 drivers/lightnvm/pblk-read.c 		split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
bio               325 drivers/lightnvm/pblk-read.c 		bio_chain(split_bio, bio);
bio               326 drivers/lightnvm/pblk-read.c 		generic_make_request(bio);
bio               334 drivers/lightnvm/pblk-read.c 		bio = split_bio;
bio               343 drivers/lightnvm/pblk-read.c 		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
bio               222 drivers/lightnvm/pblk-recovery.c 	rqd->bio = NULL;
bio               393 drivers/lightnvm/pblk-recovery.c 	rqd->bio = NULL;
bio                25 drivers/lightnvm/pblk-write.c 	struct bio *original_bio;
bio                53 drivers/lightnvm/pblk-write.c 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
bio                62 drivers/lightnvm/pblk-write.c 	bio_put(rqd->bio);
bio               224 drivers/lightnvm/pblk-write.c 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
bio               226 drivers/lightnvm/pblk-write.c 	bio_put(rqd->bio);
bio               264 drivers/lightnvm/pblk-write.c 		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
bio               548 drivers/lightnvm/pblk-write.c 	struct bio *bio = rqd->bio;
bio               551 drivers/lightnvm/pblk-write.c 		pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
bio               557 drivers/lightnvm/pblk-write.c 	struct bio *bio;
bio               614 drivers/lightnvm/pblk-write.c 	bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
bio               616 drivers/lightnvm/pblk-write.c 	bio->bi_iter.bi_sector = 0; /* internal bio */
bio               617 drivers/lightnvm/pblk-write.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               620 drivers/lightnvm/pblk-write.c 	rqd->bio = bio;
bio               641 drivers/lightnvm/pblk-write.c 	bio_put(bio);
bio               725 drivers/lightnvm/pblk.h int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
bio               741 drivers/lightnvm/pblk.h int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
bio               771 drivers/lightnvm/pblk.h void pblk_discard(struct pblk *pblk, struct bio *bio);
bio               822 drivers/lightnvm/pblk.h int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
bio               824 drivers/lightnvm/pblk.h void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
bio               846 drivers/lightnvm/pblk.h void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
bio               872 drivers/lightnvm/pblk.h void pblk_submit_read(struct pblk *pblk, struct bio *bio);
bio              1308 drivers/lightnvm/pblk.h static inline unsigned int pblk_get_bi_idx(struct bio *bio)
bio              1310 drivers/lightnvm/pblk.h 	return bio->bi_iter.bi_idx;
bio              1313 drivers/lightnvm/pblk.h static inline sector_t pblk_get_lba(struct bio *bio)
bio              1315 drivers/lightnvm/pblk.h 	return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
bio              1318 drivers/lightnvm/pblk.h static inline unsigned int pblk_get_secs(struct bio *bio)
bio              1320 drivers/lightnvm/pblk.h 	return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
bio               277 drivers/md/bcache/bcache.h 			  struct bio *bio, unsigned int sectors);
bio               304 drivers/md/bcache/bcache.h 	struct bio		sb_bio;
bio               409 drivers/md/bcache/bcache.h 	struct bio		sb_bio;
bio               744 drivers/md/bcache/bcache.h 	struct bio		bio;
bio               919 drivers/md/bcache/bcache.h 				      struct bio *bio,
bio               924 drivers/md/bcache/bcache.h 		bio->bi_status = BLK_STS_IOERR;
bio               925 drivers/md/bcache/bcache.h 		bio_endio(bio);
bio               928 drivers/md/bcache/bcache.h 	generic_make_request(bio);
bio               947 drivers/md/bcache/bcache.h void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
bio               950 drivers/md/bcache/bcache.h void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
bio               952 drivers/md/bcache/bcache.h void bch_bbio_endio(struct cache_set *c, struct bio *bio,
bio               954 drivers/md/bcache/bcache.h void bch_bbio_free(struct bio *bio, struct cache_set *c);
bio               955 drivers/md/bcache/bcache.h struct bio *bch_bbio_alloc(struct cache_set *c);
bio               957 drivers/md/bcache/bcache.h void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
bio               958 drivers/md/bcache/bcache.h void bch_submit_bbio(struct bio *bio, struct cache_set *c,
bio               292 drivers/md/bcache/btree.c static void btree_node_read_endio(struct bio *bio)
bio               294 drivers/md/bcache/btree.c 	struct closure *cl = bio->bi_private;
bio               303 drivers/md/bcache/btree.c 	struct bio *bio;
bio               309 drivers/md/bcache/btree.c 	bio = bch_bbio_alloc(b->c);
bio               310 drivers/md/bcache/btree.c 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio               311 drivers/md/bcache/btree.c 	bio->bi_end_io	= btree_node_read_endio;
bio               312 drivers/md/bcache/btree.c 	bio->bi_private	= &cl;
bio               313 drivers/md/bcache/btree.c 	bio->bi_opf = REQ_OP_READ | REQ_META;
bio               315 drivers/md/bcache/btree.c 	bch_bio_map(bio, b->keys.set[0].data);
bio               317 drivers/md/bcache/btree.c 	bch_submit_bbio(bio, b->c, &b->key, 0);
bio               320 drivers/md/bcache/btree.c 	if (bio->bi_status)
bio               323 drivers/md/bcache/btree.c 	bch_bbio_free(bio, b->c);
bio               364 drivers/md/bcache/btree.c 	bch_bbio_free(b->bio, b->c);
bio               365 drivers/md/bcache/btree.c 	b->bio = NULL;
bio               378 drivers/md/bcache/btree.c 	bio_free_pages(b->bio);
bio               382 drivers/md/bcache/btree.c static void btree_node_write_endio(struct bio *bio)
bio               384 drivers/md/bcache/btree.c 	struct closure *cl = bio->bi_private;
bio               387 drivers/md/bcache/btree.c 	if (bio->bi_status)
bio               390 drivers/md/bcache/btree.c 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
bio               403 drivers/md/bcache/btree.c 	BUG_ON(b->bio);
bio               404 drivers/md/bcache/btree.c 	b->bio = bch_bbio_alloc(b->c);
bio               406 drivers/md/bcache/btree.c 	b->bio->bi_end_io	= btree_node_write_endio;
bio               407 drivers/md/bcache/btree.c 	b->bio->bi_private	= cl;
bio               408 drivers/md/bcache/btree.c 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
bio               409 drivers/md/bcache/btree.c 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
bio               410 drivers/md/bcache/btree.c 	bch_bio_map(b->bio, i);
bio               431 drivers/md/bcache/btree.c 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
bio               436 drivers/md/bcache/btree.c 		bio_for_each_segment_all(bv, b->bio, iter_all) {
bio               441 drivers/md/bcache/btree.c 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
bio               449 drivers/md/bcache/btree.c 		b->bio->bi_vcnt = 0;
bio               450 drivers/md/bcache/btree.c 		bch_bio_map(b->bio, i);
bio               452 drivers/md/bcache/btree.c 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
bio               147 drivers/md/bcache/btree.h 	struct bio		*bio;
bio                35 drivers/md/bcache/debug.c 	struct bio *bio;
bio                52 drivers/md/bcache/debug.c 	bio = bch_bbio_alloc(b->c);
bio                53 drivers/md/bcache/debug.c 	bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
bio                54 drivers/md/bcache/debug.c 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
bio                55 drivers/md/bcache/debug.c 	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
bio                56 drivers/md/bcache/debug.c 	bio->bi_opf		= REQ_OP_READ | REQ_META;
bio                57 drivers/md/bcache/debug.c 	bch_bio_map(bio, sorted);
bio                59 drivers/md/bcache/debug.c 	submit_bio_wait(bio);
bio                60 drivers/md/bcache/debug.c 	bch_bbio_free(bio, b->c);
bio               108 drivers/md/bcache/debug.c void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bio               110 drivers/md/bcache/debug.c 	struct bio *check;
bio               114 drivers/md/bcache/debug.c 	check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
bio               117 drivers/md/bcache/debug.c 	check->bi_disk = bio->bi_disk;
bio               119 drivers/md/bcache/debug.c 	check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio               120 drivers/md/bcache/debug.c 	check->bi_iter.bi_size = bio->bi_iter.bi_size;
bio               129 drivers/md/bcache/debug.c 	bio_for_each_segment(bv, bio, iter) {
bio               142 drivers/md/bcache/debug.c 				 (uint64_t) bio->bi_iter.bi_sector);
bio                 5 drivers/md/bcache/debug.h struct bio;
bio                12 drivers/md/bcache/debug.h void bch_data_verify(struct cached_dev *dc, struct bio *bio);
bio                21 drivers/md/bcache/debug.h static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
bio                17 drivers/md/bcache/io.c void bch_bbio_free(struct bio *bio, struct cache_set *c)
bio                19 drivers/md/bcache/io.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio                24 drivers/md/bcache/io.c struct bio *bch_bbio_alloc(struct cache_set *c)
bio                27 drivers/md/bcache/io.c 	struct bio *bio = &b->bio;
bio                29 drivers/md/bcache/io.c 	bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
bio                31 drivers/md/bcache/io.c 	return bio;
bio                34 drivers/md/bcache/io.c void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
bio                36 drivers/md/bcache/io.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio                38 drivers/md/bcache/io.c 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
bio                39 drivers/md/bcache/io.c 	bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
bio                42 drivers/md/bcache/io.c 	closure_bio_submit(c, bio, bio->bi_private);
bio                45 drivers/md/bcache/io.c void bch_submit_bbio(struct bio *bio, struct cache_set *c,
bio                48 drivers/md/bcache/io.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio                51 drivers/md/bcache/io.c 	__bch_submit_bbio(bio, c);
bio                55 drivers/md/bcache/io.c void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
bio                67 drivers/md/bcache/io.c 	if (bio->bi_opf & REQ_RAHEAD) {
bio               136 drivers/md/bcache/io.c void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
bio               139 drivers/md/bcache/io.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio               141 drivers/md/bcache/io.c 	int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
bio               143 drivers/md/bcache/io.c 	unsigned int threshold = op_is_write(bio_op(bio))
bio               166 drivers/md/bcache/io.c void bch_bbio_endio(struct cache_set *c, struct bio *bio,
bio               169 drivers/md/bcache/io.c 	struct closure *cl = bio->bi_private;
bio               171 drivers/md/bcache/io.c 	bch_bbio_count_io_errors(c, bio, error, m);
bio               172 drivers/md/bcache/io.c 	bio_put(bio);
bio                28 drivers/md/bcache/journal.c static void journal_read_endio(struct bio *bio)
bio                30 drivers/md/bcache/journal.c 	struct closure *cl = bio->bi_private;
bio                39 drivers/md/bcache/journal.c 	struct bio *bio = &ja->bio;
bio                56 drivers/md/bcache/journal.c 		bio_reset(bio);
bio                57 drivers/md/bcache/journal.c 		bio->bi_iter.bi_sector	= bucket + offset;
bio                58 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bio                59 drivers/md/bcache/journal.c 		bio->bi_iter.bi_size	= len << 9;
bio                61 drivers/md/bcache/journal.c 		bio->bi_end_io	= journal_read_endio;
bio                62 drivers/md/bcache/journal.c 		bio->bi_private = &cl;
bio                63 drivers/md/bcache/journal.c 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio                64 drivers/md/bcache/journal.c 		bch_bio_map(bio, data);
bio                66 drivers/md/bcache/journal.c 		closure_bio_submit(ca->set, bio, &cl);
bio               578 drivers/md/bcache/journal.c static void journal_discard_endio(struct bio *bio)
bio               581 drivers/md/bcache/journal.c 		container_of(bio, struct journal_device, discard_bio);
bio               601 drivers/md/bcache/journal.c 	struct bio *bio = &ja->discard_bio;
bio               625 drivers/md/bcache/journal.c 		bio_init(bio, bio->bi_inline_vecs, 1);
bio               626 drivers/md/bcache/journal.c 		bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio               627 drivers/md/bcache/journal.c 		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
bio               629 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bio               630 drivers/md/bcache/journal.c 		bio->bi_iter.bi_size	= bucket_bytes(ca);
bio               631 drivers/md/bcache/journal.c 		bio->bi_end_io		= journal_discard_endio;
bio               725 drivers/md/bcache/journal.c static void journal_write_endio(struct bio *bio)
bio               727 drivers/md/bcache/journal.c 	struct journal_write *w = bio->bi_private;
bio               729 drivers/md/bcache/journal.c 	cache_set_err_on(bio->bi_status, w->c, "journal io error");
bio               765 drivers/md/bcache/journal.c 	struct bio *bio;
bio               799 drivers/md/bcache/journal.c 		bio = &ca->journal.bio;
bio               803 drivers/md/bcache/journal.c 		bio_reset(bio);
bio               804 drivers/md/bcache/journal.c 		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
bio               805 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bio               806 drivers/md/bcache/journal.c 		bio->bi_iter.bi_size = sectors << 9;
bio               808 drivers/md/bcache/journal.c 		bio->bi_end_io	= journal_write_endio;
bio               809 drivers/md/bcache/journal.c 		bio->bi_private = w;
bio               810 drivers/md/bcache/journal.c 		bio_set_op_attrs(bio, REQ_OP_WRITE,
bio               812 drivers/md/bcache/journal.c 		bch_bio_map(bio, w->data);
bio               814 drivers/md/bcache/journal.c 		trace_bcache_journal_write(bio, w->data->keys);
bio               815 drivers/md/bcache/journal.c 		bio_list_add(&list, bio);
bio               831 drivers/md/bcache/journal.c 	while ((bio = bio_list_pop(&list)))
bio               832 drivers/md/bcache/journal.c 		closure_bio_submit(c, bio, cl);
bio               151 drivers/md/bcache/journal.h 	struct bio		discard_bio;
bio               155 drivers/md/bcache/journal.h 	struct bio		bio;
bio                19 drivers/md/bcache/movinggc.c 	struct bbio		bio;
bio                48 drivers/md/bcache/movinggc.c 	struct bio *bio = &io->bio.bio;
bio                50 drivers/md/bcache/movinggc.c 	bio_free_pages(bio);
bio                62 drivers/md/bcache/movinggc.c static void read_moving_endio(struct bio *bio)
bio                64 drivers/md/bcache/movinggc.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio                65 drivers/md/bcache/movinggc.c 	struct moving_io *io = container_of(bio->bi_private,
bio                68 drivers/md/bcache/movinggc.c 	if (bio->bi_status)
bio                69 drivers/md/bcache/movinggc.c 		io->op.status = bio->bi_status;
bio                75 drivers/md/bcache/movinggc.c 	bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
bio                80 drivers/md/bcache/movinggc.c 	struct bio *bio = &io->bio.bio;
bio                82 drivers/md/bcache/movinggc.c 	bio_init(bio, bio->bi_inline_vecs,
bio                84 drivers/md/bcache/movinggc.c 	bio_get(bio);
bio                85 drivers/md/bcache/movinggc.c 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio                87 drivers/md/bcache/movinggc.c 	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
bio                88 drivers/md/bcache/movinggc.c 	bio->bi_private		= &io->cl;
bio                89 drivers/md/bcache/movinggc.c 	bch_bio_map(bio, NULL);
bio               100 drivers/md/bcache/movinggc.c 		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
bio               102 drivers/md/bcache/movinggc.c 		op->bio			= &io->bio.bio;
bio               119 drivers/md/bcache/movinggc.c 	struct bio *bio = &io->bio.bio;
bio               121 drivers/md/bcache/movinggc.c 	bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
bio               130 drivers/md/bcache/movinggc.c 	struct bio *bio;
bio               161 drivers/md/bcache/movinggc.c 		bio = &io->bio.bio;
bio               163 drivers/md/bcache/movinggc.c 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio               164 drivers/md/bcache/movinggc.c 		bio->bi_end_io	= read_moving_endio;
bio               166 drivers/md/bcache/movinggc.c 		if (bch_bio_alloc_pages(bio, GFP_KERNEL))
bio                40 drivers/md/bcache/request.c static void bio_csum(struct bio *bio, struct bkey *k)
bio                46 drivers/md/bcache/request.c 	bio_for_each_segment(bv, bio, iter) {
bio               123 drivers/md/bcache/request.c 	struct bio *bio = op->bio;
bio               126 drivers/md/bcache/request.c 		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
bio               128 drivers/md/bcache/request.c 	while (bio_sectors(bio)) {
bio               129 drivers/md/bcache/request.c 		unsigned int sectors = min(bio_sectors(bio),
bio               135 drivers/md/bcache/request.c 		bio->bi_iter.bi_sector	+= sectors;
bio               136 drivers/md/bcache/request.c 		bio->bi_iter.bi_size	-= sectors << 9;
bio               140 drivers/md/bcache/request.c 				     bio->bi_iter.bi_sector,
bio               146 drivers/md/bcache/request.c 	bio_put(bio);
bio               181 drivers/md/bcache/request.c static void bch_data_insert_endio(struct bio *bio)
bio               183 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
bio               186 drivers/md/bcache/request.c 	if (bio->bi_status) {
bio               189 drivers/md/bcache/request.c 			op->status = bio->bi_status;
bio               196 drivers/md/bcache/request.c 	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
bio               202 drivers/md/bcache/request.c 	struct bio *bio = op->bio, *n;
bio               207 drivers/md/bcache/request.c 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
bio               214 drivers/md/bcache/request.c 	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
bio               232 drivers/md/bcache/request.c 		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
bio               234 drivers/md/bcache/request.c 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
bio               239 drivers/md/bcache/request.c 		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
bio               261 drivers/md/bcache/request.c 	} while (n != bio);
bio               291 drivers/md/bcache/request.c 		bio_put(bio);
bio               324 drivers/md/bcache/request.c 	trace_bcache_write(op->c, op->inode, op->bio,
bio               328 drivers/md/bcache/request.c 	bio_get(op->bio);
bio               375 drivers/md/bcache/request.c static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
bio               385 drivers/md/bcache/request.c 	    (bio_op(bio) == REQ_OP_DISCARD))
bio               390 drivers/md/bcache/request.c 	     op_is_write(bio_op(bio))))
bio               403 drivers/md/bcache/request.c 	if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
bio               404 drivers/md/bcache/request.c 		if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
bio               409 drivers/md/bcache/request.c 	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio               410 drivers/md/bcache/request.c 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
bio               428 drivers/md/bcache/request.c 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
bio               429 drivers/md/bcache/request.c 		if (i->last == bio->bi_iter.bi_sector &&
bio               438 drivers/md/bcache/request.c 	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
bio               439 drivers/md/bcache/request.c 		i->sequential	+= bio->bi_iter.bi_size;
bio               441 drivers/md/bcache/request.c 	i->last			 = bio_end_sector(bio);
bio               456 drivers/md/bcache/request.c 		trace_bcache_bypass_sequential(bio);
bio               461 drivers/md/bcache/request.c 		trace_bcache_bypass_congested(bio);
bio               466 drivers/md/bcache/request.c 	bch_rescale_priorities(c, bio_sectors(bio));
bio               469 drivers/md/bcache/request.c 	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
bio               479 drivers/md/bcache/request.c 	struct bbio		bio;
bio               480 drivers/md/bcache/request.c 	struct bio		*orig_bio;
bio               481 drivers/md/bcache/request.c 	struct bio		*cache_miss;
bio               496 drivers/md/bcache/request.c static void bch_cache_read_endio(struct bio *bio)
bio               498 drivers/md/bcache/request.c 	struct bbio *b = container_of(bio, struct bbio, bio);
bio               499 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
bio               509 drivers/md/bcache/request.c 	if (bio->bi_status)
bio               510 drivers/md/bcache/request.c 		s->iop.status = bio->bi_status;
bio               517 drivers/md/bcache/request.c 	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
bio               527 drivers/md/bcache/request.c 	struct bio *n, *bio = &s->bio.bio;
bio               531 drivers/md/bcache/request.c 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
bio               535 drivers/md/bcache/request.c 	    KEY_START(k) > bio->bi_iter.bi_sector) {
bio               536 drivers/md/bcache/request.c 		unsigned int bio_sectors = bio_sectors(bio);
bio               539 drivers/md/bcache/request.c 				KEY_START(k) - bio->bi_iter.bi_sector)
bio               541 drivers/md/bcache/request.c 		int ret = s->d->cache_miss(b, s, bio, sectors);
bio               561 drivers/md/bcache/request.c 	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
bio               562 drivers/md/bcache/request.c 				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
bio               565 drivers/md/bcache/request.c 	bio_key = &container_of(n, struct bbio, bio)->key;
bio               586 drivers/md/bcache/request.c 	return n == bio ? MAP_DONE : MAP_CONTINUE;
bio               592 drivers/md/bcache/request.c 	struct bio *bio = &s->bio.bio;
bio               599 drivers/md/bcache/request.c 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
bio               632 drivers/md/bcache/request.c static void request_endio(struct bio *bio)
bio               634 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
bio               636 drivers/md/bcache/request.c 	if (bio->bi_status) {
bio               639 drivers/md/bcache/request.c 		s->iop.status = bio->bi_status;
bio               644 drivers/md/bcache/request.c 	bio_put(bio);
bio               648 drivers/md/bcache/request.c static void backing_request_endio(struct bio *bio)
bio               650 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
bio               652 drivers/md/bcache/request.c 	if (bio->bi_status) {
bio               664 drivers/md/bcache/request.c 			     bio->bi_opf & REQ_PREFLUSH)) {
bio               666 drivers/md/bcache/request.c 				dc->backing_dev_name, bio->bi_status);
bio               669 drivers/md/bcache/request.c 			s->iop.status = bio->bi_status;
bio               673 drivers/md/bcache/request.c 		bch_count_backing_io_errors(dc, bio);
bio               676 drivers/md/bcache/request.c 	bio_put(bio);
bio               694 drivers/md/bcache/request.c 			struct bio *orig_bio,
bio               697 drivers/md/bcache/request.c 	struct bio *bio = &s->bio.bio;
bio               699 drivers/md/bcache/request.c 	bio_init(bio, NULL, 0);
bio               700 drivers/md/bcache/request.c 	__bio_clone_fast(bio, orig_bio);
bio               707 drivers/md/bcache/request.c 	bio->bi_end_io		= end_io_fn;
bio               708 drivers/md/bcache/request.c 	bio->bi_private		= &s->cl;
bio               710 drivers/md/bcache/request.c 	bio_cnt_set(bio, 3);
bio               719 drivers/md/bcache/request.c 	if (s->iop.bio)
bio               720 drivers/md/bcache/request.c 		bio_put(s->iop.bio);
bio               727 drivers/md/bcache/request.c static inline struct search *search_alloc(struct bio *bio,
bio               735 drivers/md/bcache/request.c 	do_bio_hook(s, bio, request_endio);
bio               738 drivers/md/bcache/request.c 	s->orig_bio		= bio;
bio               743 drivers/md/bcache/request.c 	s->write		= op_is_write(bio_op(bio));
bio               748 drivers/md/bcache/request.c 	s->iop.bio		= NULL;
bio               754 drivers/md/bcache/request.c 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
bio               780 drivers/md/bcache/request.c 	if (s->iop.bio)
bio               781 drivers/md/bcache/request.c 		bio_free_pages(s->iop.bio);
bio               789 drivers/md/bcache/request.c 	struct bio *bio = &s->bio.bio;
bio               808 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
bio               822 drivers/md/bcache/request.c 	if (s->iop.bio)
bio               823 drivers/md/bcache/request.c 		bio_free_pages(s->iop.bio);
bio               842 drivers/md/bcache/request.c 	if (s->iop.bio) {
bio               843 drivers/md/bcache/request.c 		bio_reset(s->iop.bio);
bio               844 drivers/md/bcache/request.c 		s->iop.bio->bi_iter.bi_sector =
bio               846 drivers/md/bcache/request.c 		bio_copy_dev(s->iop.bio, s->cache_miss);
bio               847 drivers/md/bcache/request.c 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bio               848 drivers/md/bcache/request.c 		bch_bio_map(s->iop.bio, NULL);
bio               850 drivers/md/bcache/request.c 		bio_copy_data(s->cache_miss, s->iop.bio);
bio               862 drivers/md/bcache/request.c 	if (s->iop.bio &&
bio               882 drivers/md/bcache/request.c 	else if (s->iop.bio || verify(dc))
bio               889 drivers/md/bcache/request.c 				 struct bio *bio, unsigned int sectors)
bio               894 drivers/md/bcache/request.c 	struct bio *miss, *cache_bio;
bio               899 drivers/md/bcache/request.c 		miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
bio               900 drivers/md/bcache/request.c 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
bio               904 drivers/md/bcache/request.c 	if (!(bio->bi_opf & REQ_RAHEAD) &&
bio               905 drivers/md/bcache/request.c 	    !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
bio               908 drivers/md/bcache/request.c 			      get_capacity(bio->bi_disk) - bio_end_sector(bio));
bio               910 drivers/md/bcache/request.c 	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
bio               913 drivers/md/bcache/request.c 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
bio               922 drivers/md/bcache/request.c 	miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
bio               925 drivers/md/bcache/request.c 	ret = miss == bio ? MAP_DONE : -EINTR;
bio               948 drivers/md/bcache/request.c 	s->iop.bio	= cache_bio;
bio               986 drivers/md/bcache/request.c 	struct bio *bio = &s->bio.bio;
bio               987 drivers/md/bcache/request.c 	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
bio               988 drivers/md/bcache/request.c 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bio              1009 drivers/md/bcache/request.c 	if (bio_op(bio) == REQ_OP_DISCARD)
bio              1020 drivers/md/bcache/request.c 		s->iop.bio = s->orig_bio;
bio              1021 drivers/md/bcache/request.c 		bio_get(s->iop.bio);
bio              1023 drivers/md/bcache/request.c 		if (bio_op(bio) == REQ_OP_DISCARD &&
bio              1028 drivers/md/bcache/request.c 		bio->bi_end_io = backing_request_endio;
bio              1029 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
bio              1033 drivers/md/bcache/request.c 		s->iop.bio = bio;
bio              1035 drivers/md/bcache/request.c 		if (bio->bi_opf & REQ_PREFLUSH) {
bio              1040 drivers/md/bcache/request.c 			struct bio *flush;
bio              1048 drivers/md/bcache/request.c 			bio_copy_dev(flush, bio);
bio              1056 drivers/md/bcache/request.c 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
bio              1058 drivers/md/bcache/request.c 		bio->bi_end_io = backing_request_endio;
bio              1059 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
bio              1070 drivers/md/bcache/request.c 	struct bio *bio = &s->bio.bio;
bio              1076 drivers/md/bcache/request.c 	bio->bi_end_io = backing_request_endio;
bio              1077 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, bio, cl);
bio              1089 drivers/md/bcache/request.c static void detached_dev_end_io(struct bio *bio)
bio              1093 drivers/md/bcache/request.c 	ddip = bio->bi_private;
bio              1094 drivers/md/bcache/request.c 	bio->bi_end_io = ddip->bi_end_io;
bio              1095 drivers/md/bcache/request.c 	bio->bi_private = ddip->bi_private;
bio              1097 drivers/md/bcache/request.c 	generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
bio              1100 drivers/md/bcache/request.c 	if (bio->bi_status) {
bio              1104 drivers/md/bcache/request.c 		bch_count_backing_io_errors(dc, bio);
bio              1108 drivers/md/bcache/request.c 	bio->bi_end_io(bio);
bio              1111 drivers/md/bcache/request.c static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
bio              1124 drivers/md/bcache/request.c 	ddip->bi_end_io = bio->bi_end_io;
bio              1125 drivers/md/bcache/request.c 	ddip->bi_private = bio->bi_private;
bio              1126 drivers/md/bcache/request.c 	bio->bi_end_io = detached_dev_end_io;
bio              1127 drivers/md/bcache/request.c 	bio->bi_private = ddip;
bio              1129 drivers/md/bcache/request.c 	if ((bio_op(bio) == REQ_OP_DISCARD) &&
bio              1131 drivers/md/bcache/request.c 		bio->bi_end_io(bio);
bio              1133 drivers/md/bcache/request.c 		generic_make_request(bio);
bio              1177 drivers/md/bcache/request.c 					struct bio *bio)
bio              1180 drivers/md/bcache/request.c 	struct bcache_device *d = bio->bi_disk->private_data;
bio              1182 drivers/md/bcache/request.c 	int rw = bio_data_dir(bio);
bio              1186 drivers/md/bcache/request.c 		bio->bi_status = BLK_STS_IOERR;
bio              1187 drivers/md/bcache/request.c 		bio_endio(bio);
bio              1207 drivers/md/bcache/request.c 			      bio_op(bio),
bio              1208 drivers/md/bcache/request.c 			      bio_sectors(bio),
bio              1211 drivers/md/bcache/request.c 	bio_set_dev(bio, dc->bdev);
bio              1212 drivers/md/bcache/request.c 	bio->bi_iter.bi_sector += dc->sb.data_offset;
bio              1215 drivers/md/bcache/request.c 		s = search_alloc(bio, d);
bio              1216 drivers/md/bcache/request.c 		trace_bcache_request_start(s->d, bio);
bio              1218 drivers/md/bcache/request.c 		if (!bio->bi_iter.bi_size) {
bio              1227 drivers/md/bcache/request.c 			s->iop.bypass = check_should_bypass(dc, bio);
bio              1236 drivers/md/bcache/request.c 		detached_dev_do_request(d, bio);
bio              1290 drivers/md/bcache/request.c 				struct bio *bio, unsigned int sectors)
bio              1292 drivers/md/bcache/request.c 	unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
bio              1294 drivers/md/bcache/request.c 	swap(bio->bi_iter.bi_size, bytes);
bio              1295 drivers/md/bcache/request.c 	zero_fill_bio(bio);
bio              1296 drivers/md/bcache/request.c 	swap(bio->bi_iter.bi_size, bytes);
bio              1298 drivers/md/bcache/request.c 	bio_advance(bio, bytes);
bio              1300 drivers/md/bcache/request.c 	if (!bio->bi_iter.bi_size)
bio              1317 drivers/md/bcache/request.c 					     struct bio *bio)
bio              1321 drivers/md/bcache/request.c 	struct bcache_device *d = bio->bi_disk->private_data;
bio              1324 drivers/md/bcache/request.c 		bio->bi_status = BLK_STS_IOERR;
bio              1325 drivers/md/bcache/request.c 		bio_endio(bio);
bio              1329 drivers/md/bcache/request.c 	generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
bio              1331 drivers/md/bcache/request.c 	s = search_alloc(bio, d);
bio              1333 drivers/md/bcache/request.c 	bio = &s->bio.bio;
bio              1335 drivers/md/bcache/request.c 	trace_bcache_request_start(s->d, bio);
bio              1337 drivers/md/bcache/request.c 	if (!bio->bi_iter.bi_size) {
bio              1346 drivers/md/bcache/request.c 	} else if (bio_data_dir(bio)) {
bio              1348 drivers/md/bcache/request.c 					&KEY(d->id, bio->bi_iter.bi_sector, 0),
bio              1349 drivers/md/bcache/request.c 					&KEY(d->id, bio_end_sector(bio), 0));
bio              1351 drivers/md/bcache/request.c 		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
bio              1353 drivers/md/bcache/request.c 		s->iop.bio		= bio;
bio                 8 drivers/md/bcache/request.h 	struct bio		*bio;
bio               199 drivers/md/bcache/super.c static void write_bdev_super_endio(struct bio *bio)
bio               201 drivers/md/bcache/super.c 	struct cached_dev *dc = bio->bi_private;
bio               203 drivers/md/bcache/super.c 	if (bio->bi_status)
bio               204 drivers/md/bcache/super.c 		bch_count_backing_io_errors(dc, bio);
bio               209 drivers/md/bcache/super.c static void __write_super(struct cache_sb *sb, struct bio *bio)
bio               211 drivers/md/bcache/super.c 	struct cache_sb *out = page_address(bio_first_page_all(bio));
bio               214 drivers/md/bcache/super.c 	bio->bi_iter.bi_sector	= SB_SECTOR;
bio               215 drivers/md/bcache/super.c 	bio->bi_iter.bi_size	= SB_SIZE;
bio               216 drivers/md/bcache/super.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
bio               217 drivers/md/bcache/super.c 	bch_bio_map(bio, NULL);
bio               241 drivers/md/bcache/super.c 	submit_bio(bio);
bio               254 drivers/md/bcache/super.c 	struct bio *bio = &dc->sb_bio;
bio               259 drivers/md/bcache/super.c 	bio_reset(bio);
bio               260 drivers/md/bcache/super.c 	bio_set_dev(bio, dc->bdev);
bio               261 drivers/md/bcache/super.c 	bio->bi_end_io	= write_bdev_super_endio;
bio               262 drivers/md/bcache/super.c 	bio->bi_private = dc;
bio               266 drivers/md/bcache/super.c 	__write_super(&dc->sb, bio);
bio               271 drivers/md/bcache/super.c static void write_super_endio(struct bio *bio)
bio               273 drivers/md/bcache/super.c 	struct cache *ca = bio->bi_private;
bio               276 drivers/md/bcache/super.c 	bch_count_io_errors(ca, bio->bi_status, 0,
bio               300 drivers/md/bcache/super.c 		struct bio *bio = &ca->sb_bio;
bio               308 drivers/md/bcache/super.c 		bio_reset(bio);
bio               309 drivers/md/bcache/super.c 		bio_set_dev(bio, ca->bdev);
bio               310 drivers/md/bcache/super.c 		bio->bi_end_io	= write_super_endio;
bio               311 drivers/md/bcache/super.c 		bio->bi_private = ca;
bio               314 drivers/md/bcache/super.c 		__write_super(&ca->sb, bio);
bio               322 drivers/md/bcache/super.c static void uuid_endio(struct bio *bio)
bio               324 drivers/md/bcache/super.c 	struct closure *cl = bio->bi_private;
bio               327 drivers/md/bcache/super.c 	cache_set_err_on(bio->bi_status, c, "accessing uuids");
bio               328 drivers/md/bcache/super.c 	bch_bbio_free(bio, c);
bio               352 drivers/md/bcache/super.c 		struct bio *bio = bch_bbio_alloc(c);
bio               354 drivers/md/bcache/super.c 		bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
bio               355 drivers/md/bcache/super.c 		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio               357 drivers/md/bcache/super.c 		bio->bi_end_io	= uuid_endio;
bio               358 drivers/md/bcache/super.c 		bio->bi_private = cl;
bio               359 drivers/md/bcache/super.c 		bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bio               360 drivers/md/bcache/super.c 		bch_bio_map(bio, c->uuids);
bio               362 drivers/md/bcache/super.c 		bch_submit_bbio(bio, c, k, i);
bio               502 drivers/md/bcache/super.c static void prio_endio(struct bio *bio)
bio               504 drivers/md/bcache/super.c 	struct cache *ca = bio->bi_private;
bio               506 drivers/md/bcache/super.c 	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
bio               507 drivers/md/bcache/super.c 	bch_bbio_free(bio, ca->set);
bio               515 drivers/md/bcache/super.c 	struct bio *bio = bch_bbio_alloc(ca->set);
bio               519 drivers/md/bcache/super.c 	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
bio               520 drivers/md/bcache/super.c 	bio_set_dev(bio, ca->bdev);
bio               521 drivers/md/bcache/super.c 	bio->bi_iter.bi_size	= bucket_bytes(ca);
bio               523 drivers/md/bcache/super.c 	bio->bi_end_io	= prio_endio;
bio               524 drivers/md/bcache/super.c 	bio->bi_private = ca;
bio               525 drivers/md/bcache/super.c 	bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bio               526 drivers/md/bcache/super.c 	bch_bio_map(bio, ca->disk_buckets);
bio               528 drivers/md/bcache/super.c 	closure_bio_submit(ca->set, bio, &ca->prio);
bio               845 drivers/md/bcache/super.c 	if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
bio              1824 drivers/md/bcache/super.c 	    bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
bio              2160 drivers/md/bcache/super.c 	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
bio               231 drivers/md/bcache/util.c void bch_bio_map(struct bio *bio, void *base)
bio               233 drivers/md/bcache/util.c 	size_t size = bio->bi_iter.bi_size;
bio               234 drivers/md/bcache/util.c 	struct bio_vec *bv = bio->bi_io_vec;
bio               236 drivers/md/bcache/util.c 	BUG_ON(!bio->bi_iter.bi_size);
bio               237 drivers/md/bcache/util.c 	BUG_ON(bio->bi_vcnt);
bio               242 drivers/md/bcache/util.c 	for (; size; bio->bi_vcnt++, bv++) {
bio               268 drivers/md/bcache/util.c int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
bio               277 drivers/md/bcache/util.c 	for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) {
bio               280 drivers/md/bcache/util.c 			while (--bv >= bio->bi_io_vec)
bio               586 drivers/md/bcache/util.h void bch_bio_map(struct bio *bio, void *base);
bio               587 drivers/md/bcache/util.h int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
bio               245 drivers/md/bcache/writeback.c 	struct bio		bio;
bio               251 drivers/md/bcache/writeback.c 	struct bio *bio = &io->bio;
bio               253 drivers/md/bcache/writeback.c 	bio_init(bio, bio->bi_inline_vecs,
bio               256 drivers/md/bcache/writeback.c 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio               258 drivers/md/bcache/writeback.c 	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
bio               259 drivers/md/bcache/writeback.c 	bio->bi_private		= w;
bio               260 drivers/md/bcache/writeback.c 	bch_bio_map(bio, NULL);
bio               273 drivers/md/bcache/writeback.c 	struct keybuf_key *w = io->bio.bi_private;
bio               276 drivers/md/bcache/writeback.c 	bio_free_pages(&io->bio);
bio               309 drivers/md/bcache/writeback.c static void dirty_endio(struct bio *bio)
bio               311 drivers/md/bcache/writeback.c 	struct keybuf_key *w = bio->bi_private;
bio               314 drivers/md/bcache/writeback.c 	if (bio->bi_status) {
bio               316 drivers/md/bcache/writeback.c 		bch_count_backing_io_errors(io->dc, bio);
bio               325 drivers/md/bcache/writeback.c 	struct keybuf_key *w = io->bio.bi_private;
bio               356 drivers/md/bcache/writeback.c 		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
bio               357 drivers/md/bcache/writeback.c 		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
bio               358 drivers/md/bcache/writeback.c 		bio_set_dev(&io->bio, io->dc->bdev);
bio               359 drivers/md/bcache/writeback.c 		io->bio.bi_end_io	= dirty_endio;
bio               362 drivers/md/bcache/writeback.c 		closure_bio_submit(io->dc->disk.c, &io->bio, cl);
bio               371 drivers/md/bcache/writeback.c static void read_dirty_endio(struct bio *bio)
bio               373 drivers/md/bcache/writeback.c 	struct keybuf_key *w = bio->bi_private;
bio               378 drivers/md/bcache/writeback.c 			    bio->bi_status, 1,
bio               381 drivers/md/bcache/writeback.c 	dirty_endio(bio);
bio               388 drivers/md/bcache/writeback.c 	closure_bio_submit(io->dc->disk.c, &io->bio, cl);
bio               471 drivers/md/bcache/writeback.c 			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
bio               472 drivers/md/bcache/writeback.c 			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
bio               473 drivers/md/bcache/writeback.c 			bio_set_dev(&io->bio,
bio               475 drivers/md/bcache/writeback.c 			io->bio.bi_end_io	= read_dirty_endio;
bio               477 drivers/md/bcache/writeback.c 			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
bio                64 drivers/md/bcache/writeback.h static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
bio                74 drivers/md/bcache/writeback.h 	if (bio_op(bio) == REQ_OP_DISCARD)
bio                78 drivers/md/bcache/writeback.h 	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio                79 drivers/md/bcache/writeback.h 				    bio_sectors(bio)))
bio                85 drivers/md/bcache/writeback.h 	return (op_is_sync(bio->bi_opf) ||
bio                86 drivers/md/bcache/writeback.h 		bio->bi_opf & (REQ_META|REQ_PRIO) ||
bio                77 drivers/md/dm-bio-prison-v1.c 			     struct bio *holder,
bio               111 drivers/md/dm-bio-prison-v1.c 			struct bio *inmate,
bio               148 drivers/md/dm-bio-prison-v1.c 		      struct bio *inmate,
bio               164 drivers/md/dm-bio-prison-v1.c 		  struct bio *inmate,
bio               236 drivers/md/dm-bio-prison-v1.c 	struct bio *bio;
bio               241 drivers/md/dm-bio-prison-v1.c 	while ((bio = bio_list_pop(&bios))) {
bio               242 drivers/md/dm-bio-prison-v1.c 		bio->bi_status = error;
bio               243 drivers/md/dm-bio-prison-v1.c 		bio_endio(bio);
bio                45 drivers/md/dm-bio-prison-v1.h 	struct bio *holder;
bio                83 drivers/md/dm-bio-prison-v1.h 		  struct bio *inmate,
bio               152 drivers/md/dm-bio-prison-v2.c 		  struct bio *inmate,
bio               175 drivers/md/dm-bio-prison-v2.c 		    struct bio *inmate,
bio                90 drivers/md/dm-bio-prison-v2.h 		    struct bio *inmate,
bio                32 drivers/md/dm-bio-record.h static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
bio                34 drivers/md/dm-bio-record.h 	bd->bi_disk = bio->bi_disk;
bio                35 drivers/md/dm-bio-record.h 	bd->bi_partno = bio->bi_partno;
bio                36 drivers/md/dm-bio-record.h 	bd->bi_flags = bio->bi_flags;
bio                37 drivers/md/dm-bio-record.h 	bd->bi_iter = bio->bi_iter;
bio                38 drivers/md/dm-bio-record.h 	bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
bio                39 drivers/md/dm-bio-record.h 	bd->bi_end_io = bio->bi_end_io;
bio                41 drivers/md/dm-bio-record.h 	bd->bi_integrity = bio_integrity(bio);
bio                45 drivers/md/dm-bio-record.h static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
bio                47 drivers/md/dm-bio-record.h 	bio->bi_disk = bd->bi_disk;
bio                48 drivers/md/dm-bio-record.h 	bio->bi_partno = bd->bi_partno;
bio                49 drivers/md/dm-bio-record.h 	bio->bi_flags = bd->bi_flags;
bio                50 drivers/md/dm-bio-record.h 	bio->bi_iter = bd->bi_iter;
bio                51 drivers/md/dm-bio-record.h 	atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
bio                52 drivers/md/dm-bio-record.h 	bio->bi_end_io = bd->bi_end_io;
bio                54 drivers/md/dm-bio-record.h 	bio->bi_integrity = bd->bi_integrity;
bio               584 drivers/md/dm-bufio.c static void bio_complete(struct bio *bio)
bio               586 drivers/md/dm-bufio.c 	struct dm_buffer *b = bio->bi_private;
bio               587 drivers/md/dm-bufio.c 	blk_status_t status = bio->bi_status;
bio               588 drivers/md/dm-bufio.c 	bio_put(bio);
bio               595 drivers/md/dm-bufio.c 	struct bio *bio;
bio               603 drivers/md/dm-bufio.c 	bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
bio               604 drivers/md/dm-bufio.c 	if (!bio) {
bio               610 drivers/md/dm-bufio.c 	bio->bi_iter.bi_sector = sector;
bio               611 drivers/md/dm-bufio.c 	bio_set_dev(bio, b->c->bdev);
bio               612 drivers/md/dm-bufio.c 	bio_set_op_attrs(bio, rw, 0);
bio               613 drivers/md/dm-bufio.c 	bio->bi_end_io = bio_complete;
bio               614 drivers/md/dm-bufio.c 	bio->bi_private = b;
bio               621 drivers/md/dm-bufio.c 		if (!bio_add_page(bio, virt_to_page(ptr), this_step,
bio               623 drivers/md/dm-bufio.c 			bio_put(bio);
bio               631 drivers/md/dm-bufio.c 	submit_bio(bio);
bio               155 drivers/md/dm-cache-target.c 	void (*issue_op)(struct bio *bio, void *context);
bio               179 drivers/md/dm-cache-target.c 	struct bio *bio;
bio               205 drivers/md/dm-cache-target.c 	while ((bio = bio_list_pop(&bios))) {
bio               207 drivers/md/dm-cache-target.c 			bio->bi_status = r;
bio               208 drivers/md/dm-cache-target.c 			bio_endio(bio);
bio               210 drivers/md/dm-cache-target.c 			b->issue_op(bio, b->issue_context);
bio               217 drivers/md/dm-cache-target.c 			 void (*issue_op)(struct bio *bio, void *),
bio               256 drivers/md/dm-cache-target.c static void issue_after_commit(struct batcher *b, struct bio *bio)
bio               263 drivers/md/dm-cache-target.c        bio_list_add(&b->bios, bio);
bio               296 drivers/md/dm-cache-target.c static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
bio               299 drivers/md/dm-cache-target.c 	h->bi_end_io = bio->bi_end_io;
bio               301 drivers/md/dm-cache-target.c 	bio->bi_end_io = bi_end_io;
bio               302 drivers/md/dm-cache-target.c 	bio->bi_private = bi_private;
bio               305 drivers/md/dm-cache-target.c static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
bio               307 drivers/md/dm-cache-target.c 	bio->bi_end_io = h->bi_end_io;
bio               502 drivers/md/dm-cache-target.c 	struct bio *overwrite_bio;
bio               599 drivers/md/dm-cache-target.c static unsigned lock_level(struct bio *bio)
bio               601 drivers/md/dm-cache-target.c 	return bio_data_dir(bio) == WRITE ?
bio               610 drivers/md/dm-cache-target.c static struct per_bio_data *get_per_bio_data(struct bio *bio)
bio               612 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
bio               617 drivers/md/dm-cache-target.c static struct per_bio_data *init_per_bio_data(struct bio *bio)
bio               619 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio               622 drivers/md/dm-cache-target.c 	pb->req_nr = dm_bio_get_target_bio_nr(bio);
bio               631 drivers/md/dm-cache-target.c static void defer_bio(struct cache *cache, struct bio *bio)
bio               636 drivers/md/dm-cache-target.c 	bio_list_add(&cache->deferred_bios, bio);
bio               656 drivers/md/dm-cache-target.c static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
bio               667 drivers/md/dm-cache-target.c 	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
bio               679 drivers/md/dm-cache-target.c 	pb = get_per_bio_data(bio);
bio               806 drivers/md/dm-cache-target.c static void remap_to_origin(struct cache *cache, struct bio *bio)
bio               808 drivers/md/dm-cache-target.c 	bio_set_dev(bio, cache->origin_dev->bdev);
bio               811 drivers/md/dm-cache-target.c static void remap_to_cache(struct cache *cache, struct bio *bio,
bio               814 drivers/md/dm-cache-target.c 	sector_t bi_sector = bio->bi_iter.bi_sector;
bio               817 drivers/md/dm-cache-target.c 	bio_set_dev(bio, cache->cache_dev->bdev);
bio               819 drivers/md/dm-cache-target.c 		bio->bi_iter.bi_sector =
bio               823 drivers/md/dm-cache-target.c 		bio->bi_iter.bi_sector =
bio               828 drivers/md/dm-cache-target.c static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
bio               834 drivers/md/dm-cache-target.c 	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio               835 drivers/md/dm-cache-target.c 	    bio_op(bio) != REQ_OP_DISCARD) {
bio               836 drivers/md/dm-cache-target.c 		pb = get_per_bio_data(bio);
bio               843 drivers/md/dm-cache-target.c static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
bio               847 drivers/md/dm-cache-target.c 		check_if_tick_bio_needed(cache, bio);
bio               848 drivers/md/dm-cache-target.c 	remap_to_origin(cache, bio);
bio               849 drivers/md/dm-cache-target.c 	if (bio_data_dir(bio) == WRITE)
bio               853 drivers/md/dm-cache-target.c static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
bio               857 drivers/md/dm-cache-target.c 	__remap_to_origin_clear_discard(cache, bio, oblock, true);
bio               860 drivers/md/dm-cache-target.c static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
bio               863 drivers/md/dm-cache-target.c 	check_if_tick_bio_needed(cache, bio);
bio               864 drivers/md/dm-cache-target.c 	remap_to_cache(cache, bio, cblock);
bio               865 drivers/md/dm-cache-target.c 	if (bio_data_dir(bio) == WRITE) {
bio               871 drivers/md/dm-cache-target.c static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
bio               873 drivers/md/dm-cache-target.c 	sector_t block_nr = bio->bi_iter.bi_sector;
bio               883 drivers/md/dm-cache-target.c static bool accountable_bio(struct cache *cache, struct bio *bio)
bio               885 drivers/md/dm-cache-target.c 	return bio_op(bio) != REQ_OP_DISCARD;
bio               888 drivers/md/dm-cache-target.c static void accounted_begin(struct cache *cache, struct bio *bio)
bio               892 drivers/md/dm-cache-target.c 	if (accountable_bio(cache, bio)) {
bio               893 drivers/md/dm-cache-target.c 		pb = get_per_bio_data(bio);
bio               894 drivers/md/dm-cache-target.c 		pb->len = bio_sectors(bio);
bio               899 drivers/md/dm-cache-target.c static void accounted_complete(struct cache *cache, struct bio *bio)
bio               901 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio               906 drivers/md/dm-cache-target.c static void accounted_request(struct cache *cache, struct bio *bio)
bio               908 drivers/md/dm-cache-target.c 	accounted_begin(cache, bio);
bio               909 drivers/md/dm-cache-target.c 	generic_make_request(bio);
bio               912 drivers/md/dm-cache-target.c static void issue_op(struct bio *bio, void *context)
bio               915 drivers/md/dm-cache-target.c 	accounted_request(cache, bio);
bio               922 drivers/md/dm-cache-target.c static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
bio               925 drivers/md/dm-cache-target.c 	struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
bio               929 drivers/md/dm-cache-target.c 	bio_chain(origin_bio, bio);
bio               937 drivers/md/dm-cache-target.c 	remap_to_cache(cache, bio, cblock);
bio              1096 drivers/md/dm-cache-target.c static bool discard_or_flush(struct bio *bio)
bio              1098 drivers/md/dm-cache-target.c 	return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
bio              1101 drivers/md/dm-cache-target.c static void calc_discard_block_range(struct cache *cache, struct bio *bio,
bio              1104 drivers/md/dm-cache-target.c 	sector_t sb = bio->bi_iter.bi_sector;
bio              1105 drivers/md/dm-cache-target.c 	sector_t se = bio_end_sector(bio);
bio              1151 drivers/md/dm-cache-target.c static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
bio              1153 drivers/md/dm-cache-target.c 	return (bio_data_dir(bio) == WRITE) &&
bio              1154 drivers/md/dm-cache-target.c 		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
bio              1157 drivers/md/dm-cache-target.c static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
bio              1160 drivers/md/dm-cache-target.c 		(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
bio              1205 drivers/md/dm-cache-target.c static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
bio              1207 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio              1214 drivers/md/dm-cache-target.c static void overwrite_endio(struct bio *bio)
bio              1216 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = bio->bi_private;
bio              1218 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio              1220 drivers/md/dm-cache-target.c 	dm_unhook_bio(&pb->hook_info, bio);
bio              1222 drivers/md/dm-cache-target.c 	if (bio->bi_status)
bio              1223 drivers/md/dm-cache-target.c 		mg->k.input = bio->bi_status;
bio              1231 drivers/md/dm-cache-target.c 	struct bio *bio = mg->overwrite_bio;
bio              1232 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio              1234 drivers/md/dm-cache-target.c 	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
bio              1241 drivers/md/dm-cache-target.c 		remap_to_cache(mg->cache, bio, mg->op->cblock);
bio              1243 drivers/md/dm-cache-target.c 		remap_to_origin(mg->cache, bio);
bio              1246 drivers/md/dm-cache-target.c 	accounted_request(mg->cache, bio);
bio              1517 drivers/md/dm-cache-target.c static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
bio              1529 drivers/md/dm-cache-target.c 	mg->overwrite_bio = bio;
bio              1531 drivers/md/dm-cache-target.c 	if (!bio)
bio              1644 drivers/md/dm-cache-target.c 			    dm_oblock_t oblock, struct bio *bio)
bio              1653 drivers/md/dm-cache-target.c 	mg->overwrite_bio = bio;
bio              1681 drivers/md/dm-cache-target.c static void inc_hit_counter(struct cache *cache, struct bio *bio)
bio              1683 drivers/md/dm-cache-target.c 	atomic_inc(bio_data_dir(bio) == READ ?
bio              1687 drivers/md/dm-cache-target.c static void inc_miss_counter(struct cache *cache, struct bio *bio)
bio              1689 drivers/md/dm-cache-target.c 	atomic_inc(bio_data_dir(bio) == READ ?
bio              1695 drivers/md/dm-cache-target.c static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bio              1704 drivers/md/dm-cache-target.c 	rb = bio_detain_shared(cache, block, bio);
bio              1716 drivers/md/dm-cache-target.c 	data_dir = bio_data_dir(bio);
bio              1718 drivers/md/dm-cache-target.c 	if (optimisable_bio(cache, bio, block)) {
bio              1725 drivers/md/dm-cache-target.c 			bio_io_error(bio);
bio              1730 drivers/md/dm-cache-target.c 			bio_drop_shared_lock(cache, bio);
bio              1732 drivers/md/dm-cache-target.c 			mg_start(cache, op, bio);
bio              1740 drivers/md/dm-cache-target.c 			bio_io_error(bio);
bio              1749 drivers/md/dm-cache-target.c 		struct per_bio_data *pb = get_per_bio_data(bio);
bio              1754 drivers/md/dm-cache-target.c 		inc_miss_counter(cache, bio);
bio              1756 drivers/md/dm-cache-target.c 			accounted_begin(cache, bio);
bio              1757 drivers/md/dm-cache-target.c 			remap_to_origin_clear_discard(cache, bio, block);
bio              1763 drivers/md/dm-cache-target.c 			bio_endio(bio);
bio              1770 drivers/md/dm-cache-target.c 		inc_hit_counter(cache, bio);
bio              1777 drivers/md/dm-cache-target.c 			if (bio_data_dir(bio) == WRITE) {
bio              1778 drivers/md/dm-cache-target.c 				bio_drop_shared_lock(cache, bio);
bio              1780 drivers/md/dm-cache-target.c 				invalidate_start(cache, cblock, block, bio);
bio              1782 drivers/md/dm-cache-target.c 				remap_to_origin_clear_discard(cache, bio, block);
bio              1784 drivers/md/dm-cache-target.c 			if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
bio              1786 drivers/md/dm-cache-target.c 				remap_to_origin_and_cache(cache, bio, block, cblock);
bio              1787 drivers/md/dm-cache-target.c 				accounted_begin(cache, bio);
bio              1789 drivers/md/dm-cache-target.c 				remap_to_cache_dirty(cache, bio, block, cblock);
bio              1796 drivers/md/dm-cache-target.c 	if (bio->bi_opf & REQ_FUA) {
bio              1801 drivers/md/dm-cache-target.c 		accounted_complete(cache, bio);
bio              1802 drivers/md/dm-cache-target.c 		issue_after_commit(&cache->committer, bio);
bio              1810 drivers/md/dm-cache-target.c static bool process_bio(struct cache *cache, struct bio *bio)
bio              1814 drivers/md/dm-cache-target.c 	if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
bio              1815 drivers/md/dm-cache-target.c 		generic_make_request(bio);
bio              1853 drivers/md/dm-cache-target.c static bool process_flush_bio(struct cache *cache, struct bio *bio)
bio              1855 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio              1858 drivers/md/dm-cache-target.c 		remap_to_origin(cache, bio);
bio              1860 drivers/md/dm-cache-target.c 		remap_to_cache(cache, bio, 0);
bio              1862 drivers/md/dm-cache-target.c 	issue_after_commit(&cache->committer, bio);
bio              1866 drivers/md/dm-cache-target.c static bool process_discard_bio(struct cache *cache, struct bio *bio)
bio              1873 drivers/md/dm-cache-target.c 	calc_discard_block_range(cache, bio, &b, &e);
bio              1880 drivers/md/dm-cache-target.c 		remap_to_origin(cache, bio);
bio              1881 drivers/md/dm-cache-target.c 		generic_make_request(bio);
bio              1883 drivers/md/dm-cache-target.c 		bio_endio(bio);
bio              1895 drivers/md/dm-cache-target.c 	struct bio *bio;
bio              1904 drivers/md/dm-cache-target.c 	while ((bio = bio_list_pop(&bios))) {
bio              1905 drivers/md/dm-cache-target.c 		if (bio->bi_opf & REQ_PREFLUSH)
bio              1906 drivers/md/dm-cache-target.c 			commit_needed = process_flush_bio(cache, bio) || commit_needed;
bio              1908 drivers/md/dm-cache-target.c 		else if (bio_op(bio) == REQ_OP_DISCARD)
bio              1909 drivers/md/dm-cache-target.c 			commit_needed = process_discard_bio(cache, bio) || commit_needed;
bio              1912 drivers/md/dm-cache-target.c 			commit_needed = process_bio(cache, bio) || commit_needed;
bio              1925 drivers/md/dm-cache-target.c 	struct bio *bio;
bio              1932 drivers/md/dm-cache-target.c 	while ((bio = bio_list_pop(&bios))) {
bio              1933 drivers/md/dm-cache-target.c 		bio->bi_status = BLK_STS_DM_REQUEUE;
bio              1934 drivers/md/dm-cache-target.c 		bio_endio(bio);
bio              2721 drivers/md/dm-cache-target.c static int cache_map(struct dm_target *ti, struct bio *bio)
bio              2727 drivers/md/dm-cache-target.c 	dm_oblock_t block = get_bio_block(cache, bio);
bio              2729 drivers/md/dm-cache-target.c 	init_per_bio_data(bio);
bio              2736 drivers/md/dm-cache-target.c 		remap_to_origin(cache, bio);
bio              2737 drivers/md/dm-cache-target.c 		accounted_begin(cache, bio);
bio              2741 drivers/md/dm-cache-target.c 	if (discard_or_flush(bio)) {
bio              2742 drivers/md/dm-cache-target.c 		defer_bio(cache, bio);
bio              2746 drivers/md/dm-cache-target.c 	r = map_bio(cache, bio, block, &commit_needed);
bio              2753 drivers/md/dm-cache-target.c static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
bio              2757 drivers/md/dm-cache-target.c 	struct per_bio_data *pb = get_per_bio_data(bio);
bio              2767 drivers/md/dm-cache-target.c 	bio_drop_shared_lock(cache, bio);
bio              2768 drivers/md/dm-cache-target.c 	accounted_complete(cache, bio);
bio                93 drivers/md/dm-clone-target.c 	struct bio flush_bio;
bio               266 drivers/md/dm-clone-target.c static inline void remap_to_source(struct clone *clone, struct bio *bio)
bio               268 drivers/md/dm-clone-target.c 	bio_set_dev(bio, clone->source_dev->bdev);
bio               271 drivers/md/dm-clone-target.c static inline void remap_to_dest(struct clone *clone, struct bio *bio)
bio               273 drivers/md/dm-clone-target.c 	bio_set_dev(bio, clone->dest_dev->bdev);
bio               276 drivers/md/dm-clone-target.c static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
bio               278 drivers/md/dm-clone-target.c 	return op_is_flush(bio->bi_opf) &&
bio               289 drivers/md/dm-clone-target.c static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
bio               291 drivers/md/dm-clone-target.c 	return (bio->bi_iter.bi_sector >> clone->region_shift);
bio               295 drivers/md/dm-clone-target.c static void bio_region_range(struct clone *clone, struct bio *bio,
bio               300 drivers/md/dm-clone-target.c 	*rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
bio               301 drivers/md/dm-clone-target.c 	end = bio_end_sector(bio) >> clone->region_shift;
bio               310 drivers/md/dm-clone-target.c static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
bio               312 drivers/md/dm-clone-target.c 	return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
bio               317 drivers/md/dm-clone-target.c 	struct bio *bio;
bio               319 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(bios))) {
bio               320 drivers/md/dm-clone-target.c 		bio->bi_status = status;
bio               321 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               327 drivers/md/dm-clone-target.c 	struct bio *bio;
bio               332 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(bios)))
bio               333 drivers/md/dm-clone-target.c 		generic_make_request(bio);
bio               346 drivers/md/dm-clone-target.c static void issue_bio(struct clone *clone, struct bio *bio)
bio               348 drivers/md/dm-clone-target.c 	if (!bio_triggers_commit(clone, bio)) {
bio               349 drivers/md/dm-clone-target.c 		generic_make_request(bio);
bio               358 drivers/md/dm-clone-target.c 		bio_io_error(bio);
bio               367 drivers/md/dm-clone-target.c 	bio_list_add(&clone->deferred_flush_bios, bio);
bio               379 drivers/md/dm-clone-target.c static void remap_and_issue(struct clone *clone, struct bio *bio)
bio               381 drivers/md/dm-clone-target.c 	remap_to_dest(clone, bio);
bio               382 drivers/md/dm-clone-target.c 	issue_bio(clone, bio);
bio               394 drivers/md/dm-clone-target.c 	struct bio *bio;
bio               402 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(bios))) {
bio               403 drivers/md/dm-clone-target.c 		if (bio_triggers_commit(clone, bio))
bio               404 drivers/md/dm-clone-target.c 			bio_list_add(&flush_bios, bio);
bio               406 drivers/md/dm-clone-target.c 			bio_list_add(&normal_bios, bio);
bio               417 drivers/md/dm-clone-target.c static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
bio               431 drivers/md/dm-clone-target.c 	if (!(bio->bi_opf & REQ_FUA)) {
bio               432 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               441 drivers/md/dm-clone-target.c 		bio_io_error(bio);
bio               450 drivers/md/dm-clone-target.c 	bio_list_add(&clone->deferred_flush_completions, bio);
bio               456 drivers/md/dm-clone-target.c static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
bio               458 drivers/md/dm-clone-target.c 	bio->bi_iter.bi_sector = sector;
bio               459 drivers/md/dm-clone-target.c 	bio->bi_iter.bi_size = to_bytes(len);
bio               462 drivers/md/dm-clone-target.c static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
bio               472 drivers/md/dm-clone-target.c 		remap_to_dest(clone, bio);
bio               473 drivers/md/dm-clone-target.c 		bio_region_range(clone, bio, &rs, &nr_regions);
bio               474 drivers/md/dm-clone-target.c 		trim_bio(bio, region_to_sector(clone, rs),
bio               476 drivers/md/dm-clone-target.c 		generic_make_request(bio);
bio               478 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               481 drivers/md/dm-clone-target.c static void process_discard_bio(struct clone *clone, struct bio *bio)
bio               485 drivers/md/dm-clone-target.c 	bio_region_range(clone, bio, &rs, &nr_regions);
bio               487 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               496 drivers/md/dm-clone-target.c 		      (unsigned long long)bio->bi_iter.bi_sector,
bio               497 drivers/md/dm-clone-target.c 		      bio_sectors(bio));
bio               498 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               507 drivers/md/dm-clone-target.c 		complete_discard_bio(clone, bio, true);
bio               517 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio               525 drivers/md/dm-clone-target.c 	bio_list_add(&clone->deferred_discard_bios, bio);
bio               540 drivers/md/dm-clone-target.c 	struct bio *overwrite_bio;
bio               838 drivers/md/dm-clone-target.c static void overwrite_endio(struct bio *bio)
bio               840 drivers/md/dm-clone-target.c 	struct dm_clone_region_hydration *hd = bio->bi_private;
bio               842 drivers/md/dm-clone-target.c 	bio->bi_end_io = hd->overwrite_bio_end_io;
bio               843 drivers/md/dm-clone-target.c 	hd->status = bio->bi_status;
bio               848 drivers/md/dm-clone-target.c static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
bio               855 drivers/md/dm-clone-target.c 	hd->overwrite_bio = bio;
bio               856 drivers/md/dm-clone-target.c 	hd->overwrite_bio_end_io = bio->bi_end_io;
bio               858 drivers/md/dm-clone-target.c 	bio->bi_end_io = overwrite_endio;
bio               859 drivers/md/dm-clone-target.c 	bio->bi_private = hd;
bio               862 drivers/md/dm-clone-target.c 	generic_make_request(bio);
bio               875 drivers/md/dm-clone-target.c static void hydrate_bio_region(struct clone *clone, struct bio *bio)
bio               882 drivers/md/dm-clone-target.c 	region_nr = bio_to_region(clone, bio);
bio               890 drivers/md/dm-clone-target.c 		bio_list_add(&hd->deferred_bios, bio);
bio               898 drivers/md/dm-clone-target.c 		issue_bio(clone, bio);
bio               917 drivers/md/dm-clone-target.c 		issue_bio(clone, bio);
bio               924 drivers/md/dm-clone-target.c 		bio_list_add(&hd2->deferred_bios, bio);
bio               939 drivers/md/dm-clone-target.c 		bio_io_error(bio);
bio               950 drivers/md/dm-clone-target.c 	if (is_overwrite_bio(clone, bio)) {
bio               952 drivers/md/dm-clone-target.c 		hydration_overwrite(hd, bio);
bio               954 drivers/md/dm-clone-target.c 		bio_list_add(&hd->deferred_bios, bio);
bio              1185 drivers/md/dm-clone-target.c 	struct bio *bio;
bio              1202 drivers/md/dm-clone-target.c 	bio_list_for_each(bio, &discards) {
bio              1203 drivers/md/dm-clone-target.c 		bio_region_range(clone, bio, &rs, &nr_regions);
bio              1215 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(&discards)))
bio              1216 drivers/md/dm-clone-target.c 		complete_discard_bio(clone, bio, r == 0);
bio              1237 drivers/md/dm-clone-target.c 	struct bio *bio;
bio              1261 drivers/md/dm-clone-target.c 		while ((bio = bio_list_pop(&bios)))
bio              1262 drivers/md/dm-clone-target.c 			bio_io_error(bio);
bio              1269 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(&bio_completions)))
bio              1270 drivers/md/dm-clone-target.c 		bio_endio(bio);
bio              1272 drivers/md/dm-clone-target.c 	while ((bio = bio_list_pop(&bios))) {
bio              1273 drivers/md/dm-clone-target.c 		if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
bio              1278 drivers/md/dm-clone-target.c 			bio_endio(bio);
bio              1280 drivers/md/dm-clone-target.c 			generic_make_request(bio);
bio              1325 drivers/md/dm-clone-target.c static int clone_map(struct dm_target *ti, struct bio *bio)
bio              1342 drivers/md/dm-clone-target.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio              1343 drivers/md/dm-clone-target.c 		remap_and_issue(clone, bio);
bio              1347 drivers/md/dm-clone-target.c 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio              1354 drivers/md/dm-clone-target.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio              1355 drivers/md/dm-clone-target.c 		process_discard_bio(clone, bio);
bio              1369 drivers/md/dm-clone-target.c 	region_nr = bio_to_region(clone, bio);
bio              1371 drivers/md/dm-clone-target.c 		remap_and_issue(clone, bio);
bio              1373 drivers/md/dm-clone-target.c 	} else if (bio_data_dir(bio) == READ) {
bio              1374 drivers/md/dm-clone-target.c 		remap_to_source(clone, bio);
bio              1378 drivers/md/dm-clone-target.c 	remap_to_dest(clone, bio);
bio              1379 drivers/md/dm-clone-target.c 	hydrate_bio_region(clone, bio);
bio              1384 drivers/md/dm-clone-target.c static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
bio                48 drivers/md/dm-crypt.c 	struct bio *bio_in;
bio                49 drivers/md/dm-crypt.c 	struct bio *bio_out;
bio                66 drivers/md/dm-crypt.c 	struct bio *base_bio;
bio               221 drivers/md/dm-crypt.c static void clone_init(struct dm_crypt_io *, struct bio *);
bio               819 drivers/md/dm-crypt.c static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
bio               825 drivers/md/dm-crypt.c 	if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
bio               828 drivers/md/dm-crypt.c 	bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
bio               832 drivers/md/dm-crypt.c 	tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
bio               837 drivers/md/dm-crypt.c 	ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
bio               894 drivers/md/dm-crypt.c 			       struct bio *bio_out, struct bio *bio_in,
bio              1185 drivers/md/dm-crypt.c 				    struct skcipher_request *req, struct bio *base_bio)
bio              1194 drivers/md/dm-crypt.c 				struct aead_request *req, struct bio *base_bio)
bio              1202 drivers/md/dm-crypt.c static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
bio              1277 drivers/md/dm-crypt.c static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
bio              1296 drivers/md/dm-crypt.c static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
bio              1299 drivers/md/dm-crypt.c 	struct bio *clone;
bio              1346 drivers/md/dm-crypt.c static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio              1358 drivers/md/dm-crypt.c 			  struct bio *bio, sector_t sector)
bio              1361 drivers/md/dm-crypt.c 	io->base_bio = bio;
bio              1382 drivers/md/dm-crypt.c 	struct bio *base_bio = io->base_bio;
bio              1417 drivers/md/dm-crypt.c static void crypt_endio(struct bio *clone)
bio              1444 drivers/md/dm-crypt.c static void clone_init(struct dm_crypt_io *io, struct bio *clone)
bio              1457 drivers/md/dm-crypt.c 	struct bio *clone;
bio              1504 drivers/md/dm-crypt.c 	struct bio *clone = io->ctx.bio_out;
bio              1565 drivers/md/dm-crypt.c 	struct bio *clone = io->ctx.bio_out;
bio              1609 drivers/md/dm-crypt.c 	struct bio *clone;
bio              2748 drivers/md/dm-crypt.c static int crypt_map(struct dm_target *ti, struct bio *bio)
bio              2758 drivers/md/dm-crypt.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio              2759 drivers/md/dm-crypt.c 	    bio_op(bio) == REQ_OP_DISCARD)) {
bio              2760 drivers/md/dm-crypt.c 		bio_set_dev(bio, cc->dev->bdev);
bio              2761 drivers/md/dm-crypt.c 		if (bio_sectors(bio))
bio              2762 drivers/md/dm-crypt.c 			bio->bi_iter.bi_sector = cc->start +
bio              2763 drivers/md/dm-crypt.c 				dm_target_offset(ti, bio->bi_iter.bi_sector);
bio              2770 drivers/md/dm-crypt.c 	if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
bio              2771 drivers/md/dm-crypt.c 	    (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
bio              2772 drivers/md/dm-crypt.c 		dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
bio              2778 drivers/md/dm-crypt.c 	if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
bio              2781 drivers/md/dm-crypt.c 	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
bio              2784 drivers/md/dm-crypt.c 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
bio              2785 drivers/md/dm-crypt.c 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
bio              2788 drivers/md/dm-crypt.c 		unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
bio              2793 drivers/md/dm-crypt.c 			if (bio_sectors(bio) > cc->tag_pool_max_sectors)
bio              2794 drivers/md/dm-crypt.c 				dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
bio                68 drivers/md/dm-delay.c static void flush_bios(struct bio *bio)
bio                70 drivers/md/dm-delay.c 	struct bio *n;
bio                72 drivers/md/dm-delay.c 	while (bio) {
bio                73 drivers/md/dm-delay.c 		n = bio->bi_next;
bio                74 drivers/md/dm-delay.c 		bio->bi_next = NULL;
bio                75 drivers/md/dm-delay.c 		generic_make_request(bio);
bio                76 drivers/md/dm-delay.c 		bio = n;
bio                80 drivers/md/dm-delay.c static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
bio                90 drivers/md/dm-delay.c 			struct bio *bio = dm_bio_from_per_bio_data(delayed,
bio                93 drivers/md/dm-delay.c 			bio_list_add(&flush_bios, bio);
bio               243 drivers/md/dm-delay.c static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
bio               251 drivers/md/dm-delay.c 	delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
bio               282 drivers/md/dm-delay.c static int delay_map(struct dm_target *ti, struct bio *bio)
bio               286 drivers/md/dm-delay.c 	struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
bio               288 drivers/md/dm-delay.c 	if (bio_data_dir(bio) == WRITE) {
bio               289 drivers/md/dm-delay.c 		if (unlikely(bio->bi_opf & REQ_PREFLUSH))
bio               297 drivers/md/dm-delay.c 	bio_set_dev(bio, c->dev->bdev);
bio               298 drivers/md/dm-delay.c 	if (bio_sectors(bio))
bio               299 drivers/md/dm-delay.c 		bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
bio               301 drivers/md/dm-delay.c 	return delay_bio(dc, c, bio);
bio               209 drivers/md/dm-dust.c static int dust_map(struct dm_target *ti, struct bio *bio)
bio               214 drivers/md/dm-dust.c 	bio_set_dev(bio, dd->dev->bdev);
bio               215 drivers/md/dm-dust.c 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
bio               217 drivers/md/dm-dust.c 	if (bio_data_dir(bio) == READ)
bio               218 drivers/md/dm-dust.c 		ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
bio               220 drivers/md/dm-dust.c 		ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
bio              1182 drivers/md/dm-era-target.c static dm_block_t get_block(struct era *era, struct bio *bio)
bio              1184 drivers/md/dm-era-target.c 	sector_t block_nr = bio->bi_iter.bi_sector;
bio              1194 drivers/md/dm-era-target.c static void remap_to_origin(struct era *era, struct bio *bio)
bio              1196 drivers/md/dm-era-target.c 	bio_set_dev(bio, era->origin_dev->bdev);
bio              1228 drivers/md/dm-era-target.c 	struct bio *bio;
bio              1240 drivers/md/dm-era-target.c 	while ((bio = bio_list_pop(&deferred_bios))) {
bio              1243 drivers/md/dm-era-target.c 					  get_block(era, bio));
bio              1254 drivers/md/dm-era-target.c 		bio_list_add(&marked_bios, bio);
bio              1264 drivers/md/dm-era-target.c 		while ((bio = bio_list_pop(&marked_bios)))
bio              1265 drivers/md/dm-era-target.c 			bio_io_error(bio);
bio              1267 drivers/md/dm-era-target.c 		while ((bio = bio_list_pop(&marked_bios)))
bio              1268 drivers/md/dm-era-target.c 			generic_make_request(bio);
bio              1317 drivers/md/dm-era-target.c static void defer_bio(struct era *era, struct bio *bio)
bio              1320 drivers/md/dm-era-target.c 	bio_list_add(&era->deferred_bios, bio);
bio              1528 drivers/md/dm-era-target.c static int era_map(struct dm_target *ti, struct bio *bio)
bio              1531 drivers/md/dm-era-target.c 	dm_block_t block = get_block(era, bio);
bio              1538 drivers/md/dm-era-target.c 	remap_to_origin(era, bio);
bio              1543 drivers/md/dm-era-target.c 	if (!(bio->bi_opf & REQ_PREFLUSH) &&
bio              1544 drivers/md/dm-era-target.c 	    (bio_data_dir(bio) == WRITE) &&
bio              1546 drivers/md/dm-era-target.c 		defer_bio(era, bio);
bio                18 drivers/md/dm-flakey.c #define all_corrupt_bio_flags_match(bio, fc)	\
bio                19 drivers/md/dm-flakey.c 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
bio               278 drivers/md/dm-flakey.c static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio               282 drivers/md/dm-flakey.c 	bio_set_dev(bio, fc->dev->bdev);
bio               283 drivers/md/dm-flakey.c 	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio               284 drivers/md/dm-flakey.c 		bio->bi_iter.bi_sector =
bio               285 drivers/md/dm-flakey.c 			flakey_map_sector(ti, bio->bi_iter.bi_sector);
bio               288 drivers/md/dm-flakey.c static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
bio               295 drivers/md/dm-flakey.c 	if (!bio_has_data(bio))
bio               302 drivers/md/dm-flakey.c 	bio_for_each_segment(bvec, bio, iter) {
bio               303 drivers/md/dm-flakey.c 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
bio               304 drivers/md/dm-flakey.c 			char *segment = (page_address(bio_iter_page(bio, iter))
bio               305 drivers/md/dm-flakey.c 					 + bio_iter_offset(bio, iter));
bio               309 drivers/md/dm-flakey.c 				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
bio               310 drivers/md/dm-flakey.c 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
bio               311 drivers/md/dm-flakey.c 				(unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
bio               314 drivers/md/dm-flakey.c 		corrupt_bio_byte -= bio_iter_len(bio, iter);
bio               318 drivers/md/dm-flakey.c static int flakey_map(struct dm_target *ti, struct bio *bio)
bio               322 drivers/md/dm-flakey.c 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
bio               326 drivers/md/dm-flakey.c 	if (bio_op(bio) == REQ_OP_ZONE_RESET)
bio               341 drivers/md/dm-flakey.c 		if (bio_data_dir(bio) == READ) {
bio               352 drivers/md/dm-flakey.c 			bio_endio(bio);
bio               356 drivers/md/dm-flakey.c 			bio_io_error(bio);
bio               364 drivers/md/dm-flakey.c 			if (all_corrupt_bio_flags_match(bio, fc))
bio               365 drivers/md/dm-flakey.c 				corrupt_bio_data(bio, fc);
bio               376 drivers/md/dm-flakey.c 	flakey_map_bio(ti, bio);
bio               381 drivers/md/dm-flakey.c static int flakey_end_io(struct dm_target *ti, struct bio *bio,
bio               385 drivers/md/dm-flakey.c 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
bio               387 drivers/md/dm-flakey.c 	if (bio_op(bio) == REQ_OP_ZONE_RESET)
bio               390 drivers/md/dm-flakey.c 	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
bio               392 drivers/md/dm-flakey.c 		    all_corrupt_bio_flags_match(bio, fc)) {
bio               396 drivers/md/dm-flakey.c 			corrupt_bio_data(bio, fc);
bio              1381 drivers/md/dm-integrity.c 	struct bio *bio;
bio              1385 drivers/md/dm-integrity.c 	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio              1386 drivers/md/dm-integrity.c 	bio_list_add(&ic->flush_bio_list, bio);
bio              1392 drivers/md/dm-integrity.c static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
bio              1395 drivers/md/dm-integrity.c 	if (unlikely(r) && !bio->bi_status)
bio              1396 drivers/md/dm-integrity.c 		bio->bi_status = errno_to_blk_status(r);
bio              1397 drivers/md/dm-integrity.c 	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
bio              1400 drivers/md/dm-integrity.c 		bio_list_add(&ic->synchronous_bios, bio);
bio              1405 drivers/md/dm-integrity.c 	bio_endio(bio);
bio              1410 drivers/md/dm-integrity.c 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio              1412 drivers/md/dm-integrity.c 	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
bio              1415 drivers/md/dm-integrity.c 		do_endio(ic, bio);
bio              1422 drivers/md/dm-integrity.c 		struct bio *bio;
bio              1429 drivers/md/dm-integrity.c 		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio              1431 drivers/md/dm-integrity.c 		if (unlikely(dio->bi_status) && !bio->bi_status)
bio              1432 drivers/md/dm-integrity.c 			bio->bi_status = dio->bi_status;
bio              1433 drivers/md/dm-integrity.c 		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
bio              1435 drivers/md/dm-integrity.c 			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
bio              1444 drivers/md/dm-integrity.c static void integrity_end_io(struct bio *bio)
bio              1446 drivers/md/dm-integrity.c 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio              1448 drivers/md/dm-integrity.c 	dm_bio_restore(&dio->bio_details, bio);
bio              1449 drivers/md/dm-integrity.c 	if (bio->bi_integrity)
bio              1450 drivers/md/dm-integrity.c 		bio->bi_opf |= REQ_INTEGRITY;
bio              1514 drivers/md/dm-integrity.c 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio              1535 drivers/md/dm-integrity.c 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
bio              1613 drivers/md/dm-integrity.c static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
bio              1616 drivers/md/dm-integrity.c 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio              1624 drivers/md/dm-integrity.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
bio              1629 drivers/md/dm-integrity.c 	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio              1630 drivers/md/dm-integrity.c 	dio->write = bio_op(bio) == REQ_OP_WRITE;
bio              1631 drivers/md/dm-integrity.c 	dio->fua = dio->write && bio->bi_opf & REQ_FUA;
bio              1637 drivers/md/dm-integrity.c 		bio->bi_opf &= ~REQ_FUA;
bio              1639 drivers/md/dm-integrity.c 	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
bio              1641 drivers/md/dm-integrity.c 		      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
bio              1645 drivers/md/dm-integrity.c 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
bio              1648 drivers/md/dm-integrity.c 		      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
bio              1655 drivers/md/dm-integrity.c 		bio_for_each_segment(bv, bio, iter) {
bio              1664 drivers/md/dm-integrity.c 	bip = bio_integrity(bio);
bio              1667 drivers/md/dm-integrity.c 			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
bio              1690 drivers/md/dm-integrity.c 	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
bio              1696 drivers/md/dm-integrity.c static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
bio              1706 drivers/md/dm-integrity.c 		struct bio_vec bv = bio_iovec(bio);
bio              1712 drivers/md/dm-integrity.c 		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
bio              1758 drivers/md/dm-integrity.c 				struct bio_integrity_payload *bip = bio_integrity(bio);
bio              1835 drivers/md/dm-integrity.c 	if (unlikely(bio->bi_iter.bi_size)) {
bio              1850 drivers/md/dm-integrity.c 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio              1867 drivers/md/dm-integrity.c 		do_endio(ic, bio);
bio              1870 drivers/md/dm-integrity.c 	dio->range.n_sectors = bio_sectors(bio);
bio              1983 drivers/md/dm-integrity.c 			bio_list_add(&bbs->bio_queue, bio);
bio              1998 drivers/md/dm-integrity.c 	dm_bio_record(&dio->bio_details, bio);
bio              1999 drivers/md/dm-integrity.c 	bio_set_dev(bio, ic->dev->bdev);
bio              2000 drivers/md/dm-integrity.c 	bio->bi_integrity = NULL;
bio              2001 drivers/md/dm-integrity.c 	bio->bi_opf &= ~REQ_INTEGRITY;
bio              2002 drivers/md/dm-integrity.c 	bio->bi_end_io = integrity_end_io;
bio              2003 drivers/md/dm-integrity.c 	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
bio              2005 drivers/md/dm-integrity.c 	generic_make_request(bio);
bio              2018 drivers/md/dm-integrity.c 		if (likely(!bio->bi_status))
bio              2032 drivers/md/dm-integrity.c 	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
bio              2072 drivers/md/dm-integrity.c 	struct bio *flushes;
bio              2125 drivers/md/dm-integrity.c 		struct bio *next = flushes->bi_next;
bio              2467 drivers/md/dm-integrity.c 	struct bio *bio;
bio              2478 drivers/md/dm-integrity.c 	while ((bio = bio_list_pop(&bio_queue))) {
bio              2481 drivers/md/dm-integrity.c 		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio              2491 drivers/md/dm-integrity.c 			bio_list_add(&waiting, bio);
bio              2502 drivers/md/dm-integrity.c 	while ((bio = bio_list_pop(&waiting))) {
bio              2503 drivers/md/dm-integrity.c 		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio              2521 drivers/md/dm-integrity.c 	struct bio *bio;
bio              2551 drivers/md/dm-integrity.c 	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
bio              2552 drivers/md/dm-integrity.c 		bio_endio(bio);
bio                90 drivers/md/dm-io.c static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
bio                98 drivers/md/dm-io.c 	bio->bi_private = (void *)((unsigned long)io | region);
bio               101 drivers/md/dm-io.c static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
bio               104 drivers/md/dm-io.c 	unsigned long val = (unsigned long)bio->bi_private;
bio               137 drivers/md/dm-io.c static void endio(struct bio *bio)
bio               143 drivers/md/dm-io.c 	if (bio->bi_status && bio_data_dir(bio) == READ)
bio               144 drivers/md/dm-io.c 		zero_fill_bio(bio);
bio               149 drivers/md/dm-io.c 	retrieve_io_and_region_from_bio(bio, &io, &region);
bio               151 drivers/md/dm-io.c 	error = bio->bi_status;
bio               152 drivers/md/dm-io.c 	bio_put(bio);
bio               230 drivers/md/dm-io.c static void bio_dp_init(struct dpages *dp, struct bio *bio)
bio               239 drivers/md/dm-io.c 	dp->context_ptr = bio->bi_io_vec;
bio               240 drivers/md/dm-io.c 	dp->context_bi = bio->bi_iter;
bio               300 drivers/md/dm-io.c 	struct bio *bio;
bio               348 drivers/md/dm-io.c 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
bio               349 drivers/md/dm-io.c 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio               350 drivers/md/dm-io.c 		bio_set_dev(bio, where->bdev);
bio               351 drivers/md/dm-io.c 		bio->bi_end_io = endio;
bio               352 drivers/md/dm-io.c 		bio_set_op_attrs(bio, op, op_flags);
bio               353 drivers/md/dm-io.c 		store_io_and_region_in_bio(bio, io, region);
bio               357 drivers/md/dm-io.c 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
bio               364 drivers/md/dm-io.c 			bio_add_page(bio, page, logical_block_size, offset);
bio               366 drivers/md/dm-io.c 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
bio               377 drivers/md/dm-io.c 			if (!bio_add_page(bio, page, len, offset))
bio               386 drivers/md/dm-io.c 		submit_bio(bio);
bio               506 drivers/md/dm-io.c 		bio_dp_init(dp, io_req->mem.ptr.bio);
bio                88 drivers/md/dm-linear.c static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio                92 drivers/md/dm-linear.c 	bio_set_dev(bio, lc->dev->bdev);
bio                93 drivers/md/dm-linear.c 	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio                94 drivers/md/dm-linear.c 		bio->bi_iter.bi_sector =
bio                95 drivers/md/dm-linear.c 			linear_map_sector(ti, bio->bi_iter.bi_sector);
bio                98 drivers/md/dm-linear.c static int linear_map(struct dm_target *ti, struct bio *bio)
bio               100 drivers/md/dm-linear.c 	linear_map_bio(ti, bio);
bio               167 drivers/md/dm-log-writes.c static void log_end_io(struct bio *bio)
bio               169 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = bio->bi_private;
bio               171 drivers/md/dm-log-writes.c 	if (bio->bi_status) {
bio               174 drivers/md/dm-log-writes.c 		DMERR("Error writing log block, error=%d", bio->bi_status);
bio               180 drivers/md/dm-log-writes.c 	bio_free_pages(bio);
bio               182 drivers/md/dm-log-writes.c 	bio_put(bio);
bio               185 drivers/md/dm-log-writes.c static void log_end_super(struct bio *bio)
bio               187 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = bio->bi_private;
bio               190 drivers/md/dm-log-writes.c 	log_end_io(bio);
bio               215 drivers/md/dm-log-writes.c 	struct bio *bio;
bio               220 drivers/md/dm-log-writes.c 	bio = bio_alloc(GFP_KERNEL, 1);
bio               221 drivers/md/dm-log-writes.c 	if (!bio) {
bio               225 drivers/md/dm-log-writes.c 	bio->bi_iter.bi_size = 0;
bio               226 drivers/md/dm-log-writes.c 	bio->bi_iter.bi_sector = sector;
bio               227 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
bio               228 drivers/md/dm-log-writes.c 	bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
bio               230 drivers/md/dm-log-writes.c 	bio->bi_private = lc;
bio               231 drivers/md/dm-log-writes.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               236 drivers/md/dm-log-writes.c 		bio_put(bio);
bio               248 drivers/md/dm-log-writes.c 	ret = bio_add_page(bio, page, lc->sectorsize, 0);
bio               253 drivers/md/dm-log-writes.c 	submit_bio(bio);
bio               256 drivers/md/dm-log-writes.c 	bio_put(bio);
bio               269 drivers/md/dm-log-writes.c 	struct bio *bio;
bio               279 drivers/md/dm-log-writes.c 		bio = bio_alloc(GFP_KERNEL, bio_pages);
bio               280 drivers/md/dm-log-writes.c 		if (!bio) {
bio               285 drivers/md/dm-log-writes.c 		bio->bi_iter.bi_size = 0;
bio               286 drivers/md/dm-log-writes.c 		bio->bi_iter.bi_sector = sector;
bio               287 drivers/md/dm-log-writes.c 		bio_set_dev(bio, lc->logdev->bdev);
bio               288 drivers/md/dm-log-writes.c 		bio->bi_end_io = log_end_io;
bio               289 drivers/md/dm-log-writes.c 		bio->bi_private = lc;
bio               290 drivers/md/dm-log-writes.c 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               308 drivers/md/dm-log-writes.c 			ret = bio_add_page(bio, page, pg_sectorlen, 0);
bio               318 drivers/md/dm-log-writes.c 		submit_bio(bio);
bio               324 drivers/md/dm-log-writes.c 	bio_free_pages(bio);
bio               325 drivers/md/dm-log-writes.c 	bio_put(bio);
bio               334 drivers/md/dm-log-writes.c 	struct bio *bio;
bio               367 drivers/md/dm-log-writes.c 	bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
bio               368 drivers/md/dm-log-writes.c 	if (!bio) {
bio               372 drivers/md/dm-log-writes.c 	bio->bi_iter.bi_size = 0;
bio               373 drivers/md/dm-log-writes.c 	bio->bi_iter.bi_sector = sector;
bio               374 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
bio               375 drivers/md/dm-log-writes.c 	bio->bi_end_io = log_end_io;
bio               376 drivers/md/dm-log-writes.c 	bio->bi_private = lc;
bio               377 drivers/md/dm-log-writes.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               384 drivers/md/dm-log-writes.c 		ret = bio_add_page(bio, block->vecs[i].bv_page,
bio               388 drivers/md/dm-log-writes.c 			submit_bio(bio);
bio               389 drivers/md/dm-log-writes.c 			bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
bio               390 drivers/md/dm-log-writes.c 			if (!bio) {
bio               394 drivers/md/dm-log-writes.c 			bio->bi_iter.bi_size = 0;
bio               395 drivers/md/dm-log-writes.c 			bio->bi_iter.bi_sector = sector;
bio               396 drivers/md/dm-log-writes.c 			bio_set_dev(bio, lc->logdev->bdev);
bio               397 drivers/md/dm-log-writes.c 			bio->bi_end_io = log_end_io;
bio               398 drivers/md/dm-log-writes.c 			bio->bi_private = lc;
bio               399 drivers/md/dm-log-writes.c 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               401 drivers/md/dm-log-writes.c 			ret = bio_add_page(bio, block->vecs[i].bv_page,
bio               405 drivers/md/dm-log-writes.c 				bio_put(bio);
bio               411 drivers/md/dm-log-writes.c 	submit_bio(bio);
bio               658 drivers/md/dm-log-writes.c static void normal_map_bio(struct dm_target *ti, struct bio *bio)
bio               662 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->dev->bdev);
bio               665 drivers/md/dm-log-writes.c static int log_writes_map(struct dm_target *ti, struct bio *bio)
bio               668 drivers/md/dm-log-writes.c 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
bio               674 drivers/md/dm-log-writes.c 	bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
bio               675 drivers/md/dm-log-writes.c 	bool fua_bio = (bio->bi_opf & REQ_FUA);
bio               676 drivers/md/dm-log-writes.c 	bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
bio               677 drivers/md/dm-log-writes.c 	bool meta_bio = (bio->bi_opf & REQ_META);
bio               688 drivers/md/dm-log-writes.c 	if (bio_data_dir(bio) == READ)
bio               692 drivers/md/dm-log-writes.c 	if (!bio_sectors(bio) && !flush_bio)
bio               702 drivers/md/dm-log-writes.c 		alloc_size = struct_size(block, vecs, bio_segments(bio));
bio               725 drivers/md/dm-log-writes.c 	block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
bio               726 drivers/md/dm-log-writes.c 	block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
bio               733 drivers/md/dm-log-writes.c 		bio_endio(bio);
bio               738 drivers/md/dm-log-writes.c 	if (flush_bio && !bio_sectors(bio)) {
bio               754 drivers/md/dm-log-writes.c 	bio_for_each_segment(bv, bio, iter) {
bio               786 drivers/md/dm-log-writes.c 	normal_map_bio(ti, bio);
bio               790 drivers/md/dm-log-writes.c static int normal_end_io(struct dm_target *ti, struct bio *bio,
bio               794 drivers/md/dm-log-writes.c 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
bio               796 drivers/md/dm-log-writes.c 	if (bio_data_dir(bio) == WRITE && pb->block) {
bio               256 drivers/md/dm-mpath.c static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
bio               258 drivers/md/dm-mpath.c 	return dm_per_bio_data(bio, multipath_per_bio_data_size());
bio               268 drivers/md/dm-mpath.c static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
bio               270 drivers/md/dm-mpath.c 	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
bio               273 drivers/md/dm-mpath.c 	mpio->nr_bytes = bio->bi_iter.bi_size;
bio               277 drivers/md/dm-mpath.c 	dm_bio_record(bio_details, bio);
bio               535 drivers/md/dm-mpath.c 	clone->bio = clone->biotail = NULL;
bio               571 drivers/md/dm-mpath.c static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
bio               580 drivers/md/dm-mpath.c 		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
bio               589 drivers/md/dm-mpath.c 		bio_list_add(&m->queued_bios, bio);
bio               604 drivers/md/dm-mpath.c static int __multipath_map_bio(struct multipath *m, struct bio *bio,
bio               607 drivers/md/dm-mpath.c 	struct pgpath *pgpath = __map_bio(m, bio);
bio               621 drivers/md/dm-mpath.c 	bio->bi_status = 0;
bio               622 drivers/md/dm-mpath.c 	bio_set_dev(bio, pgpath->path.dev->bdev);
bio               623 drivers/md/dm-mpath.c 	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio               632 drivers/md/dm-mpath.c static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
bio               637 drivers/md/dm-mpath.c 	multipath_init_per_bio_data(bio, &mpio);
bio               638 drivers/md/dm-mpath.c 	return __multipath_map_bio(m, bio, mpio);
bio               653 drivers/md/dm-mpath.c 	struct bio *bio;
bio               674 drivers/md/dm-mpath.c 	while ((bio = bio_list_pop(&bios))) {
bio               675 drivers/md/dm-mpath.c 		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
bio               676 drivers/md/dm-mpath.c 		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
bio               677 drivers/md/dm-mpath.c 		r = __multipath_map_bio(m, bio, mpio);
bio               680 drivers/md/dm-mpath.c 			bio->bi_status = BLK_STS_IOERR;
bio               681 drivers/md/dm-mpath.c 			bio_endio(bio);
bio               684 drivers/md/dm-mpath.c 			bio->bi_status = BLK_STS_DM_REQUEUE;
bio               685 drivers/md/dm-mpath.c 			bio_endio(bio);
bio               688 drivers/md/dm-mpath.c 			generic_make_request(bio);
bio              1569 drivers/md/dm-mpath.c static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
bio              3311 drivers/md/dm-raid.c static int raid_map(struct dm_target *ti, struct bio *bio)
bio              3324 drivers/md/dm-raid.c 	if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
bio              3327 drivers/md/dm-raid.c 	md_handle_request(mddev, bio);
bio               121 drivers/md/dm-raid1.c static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
bio               130 drivers/md/dm-raid1.c 	bio_list_add(bl, bio);
bio               140 drivers/md/dm-raid1.c 	struct bio *bio;
bio               142 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(bio_list)))
bio               143 drivers/md/dm-raid1.c 		queue_bio(ms, bio, WRITE);
bio               163 drivers/md/dm-raid1.c static struct mirror *bio_get_m(struct bio *bio)
bio               165 drivers/md/dm-raid1.c 	return (struct mirror *) bio->bi_next;
bio               168 drivers/md/dm-raid1.c static void bio_set_m(struct bio *bio, struct mirror *m)
bio               170 drivers/md/dm-raid1.c 	bio->bi_next = (struct bio *) m;
bio               437 drivers/md/dm-raid1.c static int mirror_available(struct mirror_set *ms, struct bio *bio)
bio               440 drivers/md/dm-raid1.c 	region_t region = dm_rh_bio_to_region(ms->rh, bio);
bio               443 drivers/md/dm-raid1.c 		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
bio               451 drivers/md/dm-raid1.c static sector_t map_sector(struct mirror *m, struct bio *bio)
bio               453 drivers/md/dm-raid1.c 	if (unlikely(!bio->bi_iter.bi_size))
bio               455 drivers/md/dm-raid1.c 	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
bio               458 drivers/md/dm-raid1.c static void map_bio(struct mirror *m, struct bio *bio)
bio               460 drivers/md/dm-raid1.c 	bio_set_dev(bio, m->dev->bdev);
bio               461 drivers/md/dm-raid1.c 	bio->bi_iter.bi_sector = map_sector(m, bio);
bio               465 drivers/md/dm-raid1.c 		       struct bio *bio)
bio               468 drivers/md/dm-raid1.c 	io->sector = map_sector(m, bio);
bio               469 drivers/md/dm-raid1.c 	io->count = bio_sectors(bio);
bio               472 drivers/md/dm-raid1.c static void hold_bio(struct mirror_set *ms, struct bio *bio)
bio               487 drivers/md/dm-raid1.c 			bio->bi_status = BLK_STS_DM_REQUEUE;
bio               489 drivers/md/dm-raid1.c 			bio->bi_status = BLK_STS_IOERR;
bio               491 drivers/md/dm-raid1.c 		bio_endio(bio);
bio               498 drivers/md/dm-raid1.c 	bio_list_add(&ms->holds, bio);
bio               507 drivers/md/dm-raid1.c 	struct bio *bio = context;
bio               510 drivers/md/dm-raid1.c 	m = bio_get_m(bio);
bio               511 drivers/md/dm-raid1.c 	bio_set_m(bio, NULL);
bio               514 drivers/md/dm-raid1.c 		bio_endio(bio);
bio               520 drivers/md/dm-raid1.c 	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
bio               524 drivers/md/dm-raid1.c 		queue_bio(m->ms, bio, bio_data_dir(bio));
bio               530 drivers/md/dm-raid1.c 	bio_io_error(bio);
bio               534 drivers/md/dm-raid1.c static void read_async_bio(struct mirror *m, struct bio *bio)
bio               541 drivers/md/dm-raid1.c 		.mem.ptr.bio = bio,
bio               543 drivers/md/dm-raid1.c 		.notify.context = bio,
bio               547 drivers/md/dm-raid1.c 	map_region(&io, m, bio);
bio               548 drivers/md/dm-raid1.c 	bio_set_m(bio, m);
bio               562 drivers/md/dm-raid1.c 	struct bio *bio;
bio               565 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(reads))) {
bio               566 drivers/md/dm-raid1.c 		region = dm_rh_bio_to_region(ms->rh, bio);
bio               573 drivers/md/dm-raid1.c 			m = choose_mirror(ms, bio->bi_iter.bi_sector);
bio               578 drivers/md/dm-raid1.c 			read_async_bio(m, bio);
bio               580 drivers/md/dm-raid1.c 			bio_io_error(bio);
bio               599 drivers/md/dm-raid1.c 	struct bio *bio = (struct bio *) context;
bio               604 drivers/md/dm-raid1.c 	ms = bio_get_m(bio)->ms;
bio               605 drivers/md/dm-raid1.c 	bio_set_m(bio, NULL);
bio               614 drivers/md/dm-raid1.c 		bio_endio(bio);
bio               622 drivers/md/dm-raid1.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio               623 drivers/md/dm-raid1.c 		bio->bi_status = BLK_STS_NOTSUPP;
bio               624 drivers/md/dm-raid1.c 		bio_endio(bio);
bio               640 drivers/md/dm-raid1.c 	bio_list_add(&ms->failures, bio);
bio               646 drivers/md/dm-raid1.c static void do_write(struct mirror_set *ms, struct bio *bio)
bio               653 drivers/md/dm-raid1.c 		.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
bio               655 drivers/md/dm-raid1.c 		.mem.ptr.bio = bio,
bio               657 drivers/md/dm-raid1.c 		.notify.context = bio,
bio               661 drivers/md/dm-raid1.c 	if (bio_op(bio) == REQ_OP_DISCARD) {
bio               668 drivers/md/dm-raid1.c 		map_region(dest++, m, bio);
bio               674 drivers/md/dm-raid1.c 	bio_set_m(bio, get_default_mirror(ms));
bio               682 drivers/md/dm-raid1.c 	struct bio *bio;
bio               699 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(writes))) {
bio               700 drivers/md/dm-raid1.c 		if ((bio->bi_opf & REQ_PREFLUSH) ||
bio               701 drivers/md/dm-raid1.c 		    (bio_op(bio) == REQ_OP_DISCARD)) {
bio               702 drivers/md/dm-raid1.c 			bio_list_add(&sync, bio);
bio               706 drivers/md/dm-raid1.c 		region = dm_rh_bio_to_region(ms->rh, bio);
bio               710 drivers/md/dm-raid1.c 			bio_list_add(&requeue, bio);
bio               730 drivers/md/dm-raid1.c 		bio_list_add(this_list, bio);
bio               768 drivers/md/dm-raid1.c 		while ((bio = bio_list_pop(&sync)))
bio               769 drivers/md/dm-raid1.c 			do_write(ms, bio);
bio               771 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(&recover)))
bio               772 drivers/md/dm-raid1.c 		dm_rh_delay(ms->rh, bio);
bio               774 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(&nosync))) {
bio               777 drivers/md/dm-raid1.c 			bio_list_add(&ms->failures, bio);
bio               781 drivers/md/dm-raid1.c 			map_bio(get_default_mirror(ms), bio);
bio               782 drivers/md/dm-raid1.c 			generic_make_request(bio);
bio               789 drivers/md/dm-raid1.c 	struct bio *bio;
bio               811 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(failures))) {
bio               814 drivers/md/dm-raid1.c 			dm_rh_mark_nosync(ms->rh, bio);
bio               831 drivers/md/dm-raid1.c 			bio_io_error(bio);
bio               833 drivers/md/dm-raid1.c 			hold_bio(ms, bio);
bio               835 drivers/md/dm-raid1.c 			bio_endio(bio);
bio              1184 drivers/md/dm-raid1.c static int mirror_map(struct dm_target *ti, struct bio *bio)
bio              1186 drivers/md/dm-raid1.c 	int r, rw = bio_data_dir(bio);
bio              1191 drivers/md/dm-raid1.c 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
bio              1197 drivers/md/dm-raid1.c 		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
bio              1198 drivers/md/dm-raid1.c 		queue_bio(ms, bio, rw);
bio              1202 drivers/md/dm-raid1.c 	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
bio              1210 drivers/md/dm-raid1.c 		if (bio->bi_opf & REQ_RAHEAD)
bio              1213 drivers/md/dm-raid1.c 		queue_bio(ms, bio, rw);
bio              1221 drivers/md/dm-raid1.c 	m = choose_mirror(ms, bio->bi_iter.bi_sector);
bio              1225 drivers/md/dm-raid1.c 	dm_bio_record(&bio_record->details, bio);
bio              1228 drivers/md/dm-raid1.c 	map_bio(m, bio);
bio              1233 drivers/md/dm-raid1.c static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bio              1236 drivers/md/dm-raid1.c 	int rw = bio_data_dir(bio);
bio              1241 drivers/md/dm-raid1.c 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
bio              1247 drivers/md/dm-raid1.c 		if (!(bio->bi_opf & REQ_PREFLUSH) &&
bio              1248 drivers/md/dm-raid1.c 		    bio_op(bio) != REQ_OP_DISCARD)
bio              1256 drivers/md/dm-raid1.c 	if (bio->bi_opf & REQ_RAHEAD)
bio              1281 drivers/md/dm-raid1.c 		if (default_ok(m) || mirror_available(ms, bio)) {
bio              1284 drivers/md/dm-raid1.c 			dm_bio_restore(bd, bio);
bio              1286 drivers/md/dm-raid1.c 			bio->bi_status = 0;
bio              1288 drivers/md/dm-raid1.c 			queue_bio(ms, bio, rw);
bio              1306 drivers/md/dm-raid1.c 	struct bio *bio;
bio              1321 drivers/md/dm-raid1.c 	while ((bio = bio_list_pop(&holds)))
bio              1322 drivers/md/dm-raid1.c 		hold_bio(ms, bio);
bio               128 drivers/md/dm-region-hash.c region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
bio               130 drivers/md/dm-region-hash.c 	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
bio               395 drivers/md/dm-region-hash.c void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
bio               400 drivers/md/dm-region-hash.c 	region_t region = dm_rh_bio_to_region(rh, bio);
bio               403 drivers/md/dm-region-hash.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio               408 drivers/md/dm-region-hash.c 	if (bio_op(bio) == REQ_OP_DISCARD)
bio               528 drivers/md/dm-region-hash.c 	struct bio *bio;
bio               530 drivers/md/dm-region-hash.c 	for (bio = bios->head; bio; bio = bio->bi_next) {
bio               531 drivers/md/dm-region-hash.c 		if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
bio               533 drivers/md/dm-region-hash.c 		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
bio               690 drivers/md/dm-region-hash.c void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
bio               695 drivers/md/dm-region-hash.c 	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
bio               696 drivers/md/dm-region-hash.c 	bio_list_add(&reg->delayed_bios, bio);
bio                82 drivers/md/dm-rq.c static void end_clone_bio(struct bio *clone)
bio               328 drivers/md/dm-rq.c static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
bio               333 drivers/md/dm-rq.c 		container_of(bio, struct dm_rq_clone_bio_info, clone);
bio               337 drivers/md/dm-rq.c 	bio->bi_end_io = end_clone_bio;
bio                28 drivers/md/dm-rq.h 	struct bio *orig;
bio                30 drivers/md/dm-rq.h 	struct bio clone;
bio               228 drivers/md/dm-snap.c 	struct bio *full_bio;
bio               244 drivers/md/dm-snap.c static void init_tracked_chunk(struct bio *bio)
bio               246 drivers/md/dm-snap.c 	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
bio               250 drivers/md/dm-snap.c static bool is_bio_tracked(struct bio *bio)
bio               252 drivers/md/dm-snap.c 	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
bio               256 drivers/md/dm-snap.c static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
bio               258 drivers/md/dm-snap.c 	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
bio               268 drivers/md/dm-snap.c static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
bio               270 drivers/md/dm-snap.c 	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
bio               921 drivers/md/dm-snap.c static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
bio               981 drivers/md/dm-snap.c static void flush_bios(struct bio *bio);
bio               985 drivers/md/dm-snap.c 	struct bio *b = NULL;
bio              1122 drivers/md/dm-snap.c static void error_bios(struct bio *bio);
bio              1127 drivers/md/dm-snap.c 	struct bio *b = NULL;
bio              1564 drivers/md/dm-snap.c static void flush_bios(struct bio *bio)
bio              1566 drivers/md/dm-snap.c 	struct bio *n;
bio              1568 drivers/md/dm-snap.c 	while (bio) {
bio              1569 drivers/md/dm-snap.c 		n = bio->bi_next;
bio              1570 drivers/md/dm-snap.c 		bio->bi_next = NULL;
bio              1571 drivers/md/dm-snap.c 		generic_make_request(bio);
bio              1572 drivers/md/dm-snap.c 		bio = n;
bio              1576 drivers/md/dm-snap.c static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
bio              1581 drivers/md/dm-snap.c static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
bio              1583 drivers/md/dm-snap.c 	struct bio *n;
bio              1586 drivers/md/dm-snap.c 	while (bio) {
bio              1587 drivers/md/dm-snap.c 		n = bio->bi_next;
bio              1588 drivers/md/dm-snap.c 		bio->bi_next = NULL;
bio              1589 drivers/md/dm-snap.c 		r = do_origin(s->origin, bio, false);
bio              1591 drivers/md/dm-snap.c 			generic_make_request(bio);
bio              1592 drivers/md/dm-snap.c 		bio = n;
bio              1599 drivers/md/dm-snap.c static void error_bios(struct bio *bio)
bio              1601 drivers/md/dm-snap.c 	struct bio *n;
bio              1603 drivers/md/dm-snap.c 	while (bio) {
bio              1604 drivers/md/dm-snap.c 		n = bio->bi_next;
bio              1605 drivers/md/dm-snap.c 		bio->bi_next = NULL;
bio              1606 drivers/md/dm-snap.c 		bio_io_error(bio);
bio              1607 drivers/md/dm-snap.c 		bio = n;
bio              1641 drivers/md/dm-snap.c 	struct bio *origin_bios = NULL;
bio              1642 drivers/md/dm-snap.c 	struct bio *snapshot_bios = NULL;
bio              1643 drivers/md/dm-snap.c 	struct bio *full_bio = NULL;
bio              1809 drivers/md/dm-snap.c static void full_bio_end_io(struct bio *bio)
bio              1811 drivers/md/dm-snap.c 	void *callback_data = bio->bi_private;
bio              1813 drivers/md/dm-snap.c 	dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
bio              1817 drivers/md/dm-snap.c 			   struct bio *bio)
bio              1822 drivers/md/dm-snap.c 	pe->full_bio = bio;
bio              1823 drivers/md/dm-snap.c 	pe->full_bio_end_io = bio->bi_end_io;
bio              1829 drivers/md/dm-snap.c 	bio->bi_end_io = full_bio_end_io;
bio              1830 drivers/md/dm-snap.c 	bio->bi_private = callback_data;
bio              1832 drivers/md/dm-snap.c 	generic_make_request(bio);
bio              1901 drivers/md/dm-snap.c 			    struct bio *bio, chunk_t chunk)
bio              1903 drivers/md/dm-snap.c 	bio_set_dev(bio, s->cow->bdev);
bio              1904 drivers/md/dm-snap.c 	bio->bi_iter.bi_sector =
bio              1907 drivers/md/dm-snap.c 		(bio->bi_iter.bi_sector & s->store->chunk_mask);
bio              1912 drivers/md/dm-snap.c 	struct bio *bio = context;
bio              1913 drivers/md/dm-snap.c 	struct dm_snapshot *s = bio->bi_private;
bio              1916 drivers/md/dm-snap.c 	bio->bi_status = write_err ? BLK_STS_IOERR : 0;
bio              1917 drivers/md/dm-snap.c 	bio_endio(bio);
bio              1921 drivers/md/dm-snap.c 			   struct bio *bio, chunk_t chunk)
bio              1926 drivers/md/dm-snap.c 	dest.sector = bio->bi_iter.bi_sector;
bio              1930 drivers/md/dm-snap.c 	WARN_ON_ONCE(bio->bi_private);
bio              1931 drivers/md/dm-snap.c 	bio->bi_private = s;
bio              1932 drivers/md/dm-snap.c 	dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
bio              1935 drivers/md/dm-snap.c static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
bio              1937 drivers/md/dm-snap.c 	return bio->bi_iter.bi_size ==
bio              1941 drivers/md/dm-snap.c static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio              1950 drivers/md/dm-snap.c 	init_tracked_chunk(bio);
bio              1952 drivers/md/dm-snap.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio              1953 drivers/md/dm-snap.c 		bio_set_dev(bio, s->cow->bdev);
bio              1957 drivers/md/dm-snap.c 	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
bio              1965 drivers/md/dm-snap.c 	if (bio_data_dir(bio) == WRITE) {
bio              1974 drivers/md/dm-snap.c 	    bio_data_dir(bio) == WRITE)) {
bio              1979 drivers/md/dm-snap.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
bio              1980 drivers/md/dm-snap.c 		if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
bio              1987 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bio              1988 drivers/md/dm-snap.c 			track_chunk(s, bio, chunk);
bio              1997 drivers/md/dm-snap.c 		remap_exception(s, e, bio, chunk);
bio              1998 drivers/md/dm-snap.c 		if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
bio              1999 drivers/md/dm-snap.c 		    io_overlaps_chunk(s, bio)) {
bio              2002 drivers/md/dm-snap.c 			zero_exception(s, e, bio, chunk);
bio              2009 drivers/md/dm-snap.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
bio              2014 drivers/md/dm-snap.c 		bio_endio(bio);
bio              2024 drivers/md/dm-snap.c 	if (bio_data_dir(bio) == WRITE) {
bio              2034 drivers/md/dm-snap.c 				remap_exception(s, e, bio, chunk);
bio              2059 drivers/md/dm-snap.c 		remap_exception(s, &pe->e, bio, chunk);
bio              2063 drivers/md/dm-snap.c 		if (!pe->started && io_overlaps_chunk(s, bio)) {
bio              2069 drivers/md/dm-snap.c 			start_full_bio(pe, bio);
bio              2073 drivers/md/dm-snap.c 		bio_list_add(&pe->snapshot_bios, bio);
bio              2086 drivers/md/dm-snap.c 		bio_set_dev(bio, s->origin->bdev);
bio              2087 drivers/md/dm-snap.c 		track_chunk(s, bio, chunk);
bio              2109 drivers/md/dm-snap.c static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
bio              2116 drivers/md/dm-snap.c 	init_tracked_chunk(bio);
bio              2118 drivers/md/dm-snap.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio              2119 drivers/md/dm-snap.c 		if (!dm_bio_get_target_bio_nr(bio))
bio              2120 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bio              2122 drivers/md/dm-snap.c 			bio_set_dev(bio, s->cow->bdev);
bio              2126 drivers/md/dm-snap.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
bio              2128 drivers/md/dm-snap.c 		bio_endio(bio);
bio              2132 drivers/md/dm-snap.c 	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
bio              2144 drivers/md/dm-snap.c 		if (bio_data_dir(bio) == WRITE &&
bio              2148 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bio              2149 drivers/md/dm-snap.c 			bio_list_add(&s->bios_queued_during_merge, bio);
bio              2154 drivers/md/dm-snap.c 		remap_exception(s, e, bio, chunk);
bio              2156 drivers/md/dm-snap.c 		if (bio_data_dir(bio) == WRITE)
bio              2157 drivers/md/dm-snap.c 			track_chunk(s, bio, chunk);
bio              2162 drivers/md/dm-snap.c 	bio_set_dev(bio, s->origin->bdev);
bio              2164 drivers/md/dm-snap.c 	if (bio_data_dir(bio) == WRITE) {
bio              2166 drivers/md/dm-snap.c 		return do_origin(s->origin, bio, false);
bio              2175 drivers/md/dm-snap.c static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
bio              2180 drivers/md/dm-snap.c 	if (is_bio_tracked(bio))
bio              2181 drivers/md/dm-snap.c 		stop_tracking_chunk(s, bio);
bio              2420 drivers/md/dm-snap.c 			  struct bio *bio)
bio              2503 drivers/md/dm-snap.c 		if (bio) {
bio              2504 drivers/md/dm-snap.c 			bio_list_add(&pe->origin_bios, bio);
bio              2505 drivers/md/dm-snap.c 			bio = NULL;
bio              2541 drivers/md/dm-snap.c static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
bio              2557 drivers/md/dm-snap.c 		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
bio              2651 drivers/md/dm-snap.c static int origin_map(struct dm_target *ti, struct bio *bio)
bio              2656 drivers/md/dm-snap.c 	bio_set_dev(bio, o->dev->bdev);
bio              2658 drivers/md/dm-snap.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH))
bio              2661 drivers/md/dm-snap.c 	if (bio_data_dir(bio) != WRITE)
bio              2665 drivers/md/dm-snap.c 		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
bio              2667 drivers/md/dm-snap.c 	if (bio_sectors(bio) > available_sectors)
bio              2668 drivers/md/dm-snap.c 		dm_accept_partial_bio(bio, available_sectors);
bio              2671 drivers/md/dm-snap.c 	return do_origin(o->dev, bio, true);
bio               264 drivers/md/dm-stripe.c static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
bio               269 drivers/md/dm-stripe.c 	stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
bio               271 drivers/md/dm-stripe.c 	stripe_map_range_sector(sc, bio_end_sector(bio),
bio               274 drivers/md/dm-stripe.c 		bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bio               275 drivers/md/dm-stripe.c 		bio->bi_iter.bi_sector = begin +
bio               277 drivers/md/dm-stripe.c 		bio->bi_iter.bi_size = to_bytes(end - begin);
bio               281 drivers/md/dm-stripe.c 		bio_endio(bio);
bio               286 drivers/md/dm-stripe.c static int stripe_map(struct dm_target *ti, struct bio *bio)
bio               292 drivers/md/dm-stripe.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio               293 drivers/md/dm-stripe.c 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
bio               295 drivers/md/dm-stripe.c 		bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
bio               298 drivers/md/dm-stripe.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
bio               299 drivers/md/dm-stripe.c 	    unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
bio               300 drivers/md/dm-stripe.c 	    unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
bio               301 drivers/md/dm-stripe.c 	    unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
bio               302 drivers/md/dm-stripe.c 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
bio               304 drivers/md/dm-stripe.c 		return stripe_map_range(sc, bio, target_bio_nr);
bio               307 drivers/md/dm-stripe.c 	stripe_map_sector(sc, bio->bi_iter.bi_sector,
bio               308 drivers/md/dm-stripe.c 			  &stripe, &bio->bi_iter.bi_sector);
bio               310 drivers/md/dm-stripe.c 	bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio               311 drivers/md/dm-stripe.c 	bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
bio               425 drivers/md/dm-stripe.c static int stripe_end_io(struct dm_target *ti, struct bio *bio,
bio               435 drivers/md/dm-stripe.c 	if (bio->bi_opf & REQ_RAHEAD)
bio               442 drivers/md/dm-stripe.c 	sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
bio               319 drivers/md/dm-switch.c static int switch_map(struct dm_target *ti, struct bio *bio)
bio               322 drivers/md/dm-switch.c 	sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio               325 drivers/md/dm-switch.c 	bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio               326 drivers/md/dm-switch.c 	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
bio               127 drivers/md/dm-target.c static int io_err_map(struct dm_target *tt, struct bio *bio)
bio               223 drivers/md/dm-thin.c typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
bio               332 drivers/md/dm-thin.c 	struct bio flush_bio;
bio               383 drivers/md/dm-thin.c 	struct bio *parent_bio;
bio               384 drivers/md/dm-thin.c 	struct bio *bio;
bio               387 drivers/md/dm-thin.c static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
bio               394 drivers/md/dm-thin.c 	op->bio = NULL;
bio               404 drivers/md/dm-thin.c 				      GFP_NOWAIT, 0, &op->bio);
bio               409 drivers/md/dm-thin.c 	if (op->bio) {
bio               414 drivers/md/dm-thin.c 		bio_chain(op->bio, op->parent_bio);
bio               415 drivers/md/dm-thin.c 		bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
bio               416 drivers/md/dm-thin.c 		submit_bio(op->bio);
bio               443 drivers/md/dm-thin.c static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
bio               455 drivers/md/dm-thin.c 	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
bio               602 drivers/md/dm-thin.c 	struct bio *bio;
bio               604 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(bios))) {
bio               605 drivers/md/dm-thin.c 		bio->bi_status = error;
bio               606 drivers/md/dm-thin.c 		bio_endio(bio);
bio               680 drivers/md/dm-thin.c static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
bio               683 drivers/md/dm-thin.c 	sector_t block_nr = bio->bi_iter.bi_sector;
bio               696 drivers/md/dm-thin.c static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
bio               700 drivers/md/dm-thin.c 	sector_t b = bio->bi_iter.bi_sector;
bio               701 drivers/md/dm-thin.c 	sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
bio               721 drivers/md/dm-thin.c static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
bio               724 drivers/md/dm-thin.c 	sector_t bi_sector = bio->bi_iter.bi_sector;
bio               726 drivers/md/dm-thin.c 	bio_set_dev(bio, tc->pool_dev->bdev);
bio               728 drivers/md/dm-thin.c 		bio->bi_iter.bi_sector =
bio               732 drivers/md/dm-thin.c 		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
bio               736 drivers/md/dm-thin.c static void remap_to_origin(struct thin_c *tc, struct bio *bio)
bio               738 drivers/md/dm-thin.c 	bio_set_dev(bio, tc->origin_dev->bdev);
bio               741 drivers/md/dm-thin.c static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
bio               743 drivers/md/dm-thin.c 	return op_is_flush(bio->bi_opf) &&
bio               747 drivers/md/dm-thin.c static void inc_all_io_entry(struct pool *pool, struct bio *bio)
bio               751 drivers/md/dm-thin.c 	if (bio_op(bio) == REQ_OP_DISCARD)
bio               754 drivers/md/dm-thin.c 	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio               758 drivers/md/dm-thin.c static void issue(struct thin_c *tc, struct bio *bio)
bio               763 drivers/md/dm-thin.c 	if (!bio_triggers_commit(tc, bio)) {
bio               764 drivers/md/dm-thin.c 		generic_make_request(bio);
bio               774 drivers/md/dm-thin.c 		bio_io_error(bio);
bio               783 drivers/md/dm-thin.c 	bio_list_add(&pool->deferred_flush_bios, bio);
bio               787 drivers/md/dm-thin.c static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
bio               789 drivers/md/dm-thin.c 	remap_to_origin(tc, bio);
bio               790 drivers/md/dm-thin.c 	issue(tc, bio);
bio               793 drivers/md/dm-thin.c static void remap_and_issue(struct thin_c *tc, struct bio *bio,
bio               796 drivers/md/dm-thin.c 	remap(tc, bio, block);
bio               797 drivers/md/dm-thin.c 	issue(tc, bio);
bio               830 drivers/md/dm-thin.c 	struct bio *bio;
bio               862 drivers/md/dm-thin.c static void overwrite_endio(struct bio *bio)
bio               864 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio               867 drivers/md/dm-thin.c 	bio->bi_end_io = m->saved_bi_end_io;
bio               869 drivers/md/dm-thin.c 	m->status = bio->bi_status;
bio               899 drivers/md/dm-thin.c static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
bio               911 drivers/md/dm-thin.c 	struct bio *bio;
bio               913 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&cell->bios))) {
bio               914 drivers/md/dm-thin.c 		if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
bio               915 drivers/md/dm-thin.c 			bio_list_add(&info->defer_bios, bio);
bio               917 drivers/md/dm-thin.c 			inc_all_io_entry(info->tc->pool, bio);
bio               924 drivers/md/dm-thin.c 			bio_list_add(&info->issue_bios, bio);
bio               933 drivers/md/dm-thin.c 	struct bio *bio;
bio               948 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&info.defer_bios)))
bio               949 drivers/md/dm-thin.c 		thin_defer_bio(tc, bio);
bio               951 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&info.issue_bios)))
bio               952 drivers/md/dm-thin.c 		remap_and_issue(info.tc, bio, block);
bio               962 drivers/md/dm-thin.c static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
bio               971 drivers/md/dm-thin.c 	if (!bio_triggers_commit(tc, bio)) {
bio               972 drivers/md/dm-thin.c 		bio_endio(bio);
bio               982 drivers/md/dm-thin.c 		bio_io_error(bio);
bio               991 drivers/md/dm-thin.c 	bio_list_add(&pool->deferred_flush_completions, bio);
bio               999 drivers/md/dm-thin.c 	struct bio *bio = m->bio;
bio              1025 drivers/md/dm-thin.c 	if (bio) {
bio              1027 drivers/md/dm-thin.c 		complete_overwrite_bio(tc, bio);
bio              1051 drivers/md/dm-thin.c 	bio_io_error(m->bio);
bio              1057 drivers/md/dm-thin.c 	bio_endio(m->bio);
bio              1069 drivers/md/dm-thin.c 		bio_io_error(m->bio);
bio              1071 drivers/md/dm-thin.c 		bio_endio(m->bio);
bio              1080 drivers/md/dm-thin.c 						   struct bio *discard_parent)
bio              1139 drivers/md/dm-thin.c static void passdown_endio(struct bio *bio)
bio              1145 drivers/md/dm-thin.c 	queue_passdown_pt2(bio->bi_private);
bio              1146 drivers/md/dm-thin.c 	bio_put(bio);
bio              1154 drivers/md/dm-thin.c 	struct bio *discard_parent;
bio              1165 drivers/md/dm-thin.c 		bio_io_error(m->bio);
bio              1178 drivers/md/dm-thin.c 		bio_io_error(m->bio);
bio              1220 drivers/md/dm-thin.c 		bio_io_error(m->bio);
bio              1222 drivers/md/dm-thin.c 		bio_endio(m->bio);
bio              1247 drivers/md/dm-thin.c static int io_overlaps_block(struct pool *pool, struct bio *bio)
bio              1249 drivers/md/dm-thin.c 	return bio->bi_iter.bi_size ==
bio              1253 drivers/md/dm-thin.c static int io_overwrites_block(struct pool *pool, struct bio *bio)
bio              1255 drivers/md/dm-thin.c 	return (bio_data_dir(bio) == WRITE) &&
bio              1256 drivers/md/dm-thin.c 		io_overlaps_block(pool, bio);
bio              1259 drivers/md/dm-thin.c static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
bio              1262 drivers/md/dm-thin.c 	*save = bio->bi_end_io;
bio              1263 drivers/md/dm-thin.c 	bio->bi_end_io = fn;
bio              1284 drivers/md/dm-thin.c 	m->bio = NULL;
bio              1303 drivers/md/dm-thin.c static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
bio              1308 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              1311 drivers/md/dm-thin.c 	m->bio = bio;
bio              1312 drivers/md/dm-thin.c 	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
bio              1313 drivers/md/dm-thin.c 	inc_all_io_entry(pool, bio);
bio              1314 drivers/md/dm-thin.c 	remap_and_issue(tc, bio, data_begin);
bio              1323 drivers/md/dm-thin.c 			  struct dm_bio_prison_cell *cell, struct bio *bio,
bio              1351 drivers/md/dm-thin.c 	if (io_overwrites_block(pool, bio))
bio              1352 drivers/md/dm-thin.c 		remap_and_issue_overwrite(tc, bio, data_dest, m);
bio              1383 drivers/md/dm-thin.c 				   struct dm_bio_prison_cell *cell, struct bio *bio)
bio              1386 drivers/md/dm-thin.c 		      data_origin, data_dest, cell, bio,
bio              1392 drivers/md/dm-thin.c 			  struct bio *bio)
bio              1410 drivers/md/dm-thin.c 		if (io_overwrites_block(pool, bio))
bio              1411 drivers/md/dm-thin.c 			remap_and_issue_overwrite(tc, bio, data_block, m);
bio              1421 drivers/md/dm-thin.c 				   struct dm_bio_prison_cell *cell, struct bio *bio)
bio              1429 drivers/md/dm-thin.c 			      virt_block, data_dest, cell, bio,
bio              1434 drivers/md/dm-thin.c 			      virt_block, data_dest, cell, bio,
bio              1438 drivers/md/dm-thin.c 		schedule_zero(tc, virt_block, data_dest, cell, bio);
bio              1594 drivers/md/dm-thin.c static void retry_on_resume(struct bio *bio)
bio              1596 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              1601 drivers/md/dm-thin.c 	bio_list_add(&tc->retry_on_resume_list, bio);
bio              1629 drivers/md/dm-thin.c static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
bio              1634 drivers/md/dm-thin.c 		bio->bi_status = error;
bio              1635 drivers/md/dm-thin.c 		bio_endio(bio);
bio              1637 drivers/md/dm-thin.c 		retry_on_resume(bio);
bio              1642 drivers/md/dm-thin.c 	struct bio *bio;
bio              1655 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&bios)))
bio              1656 drivers/md/dm-thin.c 		retry_on_resume(bio);
bio              1673 drivers/md/dm-thin.c 	m->bio = virt_cell->holder;
bio              1680 drivers/md/dm-thin.c 				 struct bio *bio)
bio              1724 drivers/md/dm-thin.c 		m->bio = bio;
bio              1734 drivers/md/dm-thin.c 		bio_inc_remaining(bio);
bio              1744 drivers/md/dm-thin.c 	struct bio *bio = virt_cell->holder;
bio              1745 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              1753 drivers/md/dm-thin.c 	break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
bio              1760 drivers/md/dm-thin.c 	bio_endio(bio);
bio              1763 drivers/md/dm-thin.c static void process_discard_bio(struct thin_c *tc, struct bio *bio)
bio              1769 drivers/md/dm-thin.c 	get_bio_block_range(tc, bio, &begin, &end);
bio              1774 drivers/md/dm-thin.c 		bio_endio(bio);
bio              1779 drivers/md/dm-thin.c 	if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
bio              1792 drivers/md/dm-thin.c static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
bio              1805 drivers/md/dm-thin.c 				       data_block, cell, bio);
bio              1824 drivers/md/dm-thin.c 	struct bio *bio;
bio              1826 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&cell->bios))) {
bio              1827 drivers/md/dm-thin.c 		if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
bio              1828 drivers/md/dm-thin.c 		    bio_op(bio) == REQ_OP_DISCARD)
bio              1829 drivers/md/dm-thin.c 			bio_list_add(&info->defer_bios, bio);
bio              1831 drivers/md/dm-thin.c 			struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              1834 drivers/md/dm-thin.c 			inc_all_io_entry(info->tc->pool, bio);
bio              1835 drivers/md/dm-thin.c 			bio_list_add(&info->issue_bios, bio);
bio              1844 drivers/md/dm-thin.c 	struct bio *bio;
bio              1854 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&info.defer_bios)))
bio              1855 drivers/md/dm-thin.c 		thin_defer_bio(tc, bio);
bio              1857 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&info.issue_bios)))
bio              1858 drivers/md/dm-thin.c 		remap_and_issue(tc, bio, block);
bio              1861 drivers/md/dm-thin.c static void process_shared_bio(struct thin_c *tc, struct bio *bio,
bio              1875 drivers/md/dm-thin.c 	if (bio_detain(pool, &key, bio, &data_cell)) {
bio              1880 drivers/md/dm-thin.c 	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
bio              1881 drivers/md/dm-thin.c 		break_sharing(tc, bio, block, &key, lookup_result, data_cell);
bio              1884 drivers/md/dm-thin.c 		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              1887 drivers/md/dm-thin.c 		inc_all_io_entry(pool, bio);
bio              1888 drivers/md/dm-thin.c 		remap_and_issue(tc, bio, lookup_result->block);
bio              1895 drivers/md/dm-thin.c static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
bio              1905 drivers/md/dm-thin.c 	if (!bio->bi_iter.bi_size) {
bio              1906 drivers/md/dm-thin.c 		inc_all_io_entry(pool, bio);
bio              1909 drivers/md/dm-thin.c 		remap_and_issue(tc, bio, 0);
bio              1916 drivers/md/dm-thin.c 	if (bio_data_dir(bio) == READ) {
bio              1917 drivers/md/dm-thin.c 		zero_fill_bio(bio);
bio              1919 drivers/md/dm-thin.c 		bio_endio(bio);
bio              1927 drivers/md/dm-thin.c 			schedule_external_copy(tc, block, data_block, cell, bio);
bio              1929 drivers/md/dm-thin.c 			schedule_zero(tc, block, data_block, cell, bio);
bio              1948 drivers/md/dm-thin.c 	struct bio *bio = cell->holder;
bio              1949 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
bio              1961 drivers/md/dm-thin.c 			process_shared_bio(tc, bio, block, &lookup_result, cell);
bio              1963 drivers/md/dm-thin.c 			inc_all_io_entry(pool, bio);
bio              1964 drivers/md/dm-thin.c 			remap_and_issue(tc, bio, lookup_result.block);
bio              1970 drivers/md/dm-thin.c 		if (bio_data_dir(bio) == READ && tc->origin_dev) {
bio              1971 drivers/md/dm-thin.c 			inc_all_io_entry(pool, bio);
bio              1974 drivers/md/dm-thin.c 			if (bio_end_sector(bio) <= tc->origin_size)
bio              1975 drivers/md/dm-thin.c 				remap_to_origin_and_issue(tc, bio);
bio              1977 drivers/md/dm-thin.c 			else if (bio->bi_iter.bi_sector < tc->origin_size) {
bio              1978 drivers/md/dm-thin.c 				zero_fill_bio(bio);
bio              1979 drivers/md/dm-thin.c 				bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
bio              1980 drivers/md/dm-thin.c 				remap_to_origin_and_issue(tc, bio);
bio              1983 drivers/md/dm-thin.c 				zero_fill_bio(bio);
bio              1984 drivers/md/dm-thin.c 				bio_endio(bio);
bio              1987 drivers/md/dm-thin.c 			provision_block(tc, bio, block, cell);
bio              1994 drivers/md/dm-thin.c 		bio_io_error(bio);
bio              1999 drivers/md/dm-thin.c static void process_bio(struct thin_c *tc, struct bio *bio)
bio              2002 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
bio              2011 drivers/md/dm-thin.c 	if (bio_detain(pool, &key, bio, &cell))
bio              2017 drivers/md/dm-thin.c static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
bio              2021 drivers/md/dm-thin.c 	int rw = bio_data_dir(bio);
bio              2022 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
bio              2028 drivers/md/dm-thin.c 		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
bio              2029 drivers/md/dm-thin.c 			handle_unserviceable_bio(tc->pool, bio);
bio              2033 drivers/md/dm-thin.c 			inc_all_io_entry(tc->pool, bio);
bio              2034 drivers/md/dm-thin.c 			remap_and_issue(tc, bio, lookup_result.block);
bio              2044 drivers/md/dm-thin.c 			handle_unserviceable_bio(tc->pool, bio);
bio              2049 drivers/md/dm-thin.c 			inc_all_io_entry(tc->pool, bio);
bio              2050 drivers/md/dm-thin.c 			remap_to_origin_and_issue(tc, bio);
bio              2054 drivers/md/dm-thin.c 		zero_fill_bio(bio);
bio              2055 drivers/md/dm-thin.c 		bio_endio(bio);
bio              2063 drivers/md/dm-thin.c 		bio_io_error(bio);
bio              2068 drivers/md/dm-thin.c static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
bio              2070 drivers/md/dm-thin.c 	__process_bio_read_only(tc, bio, NULL);
bio              2078 drivers/md/dm-thin.c static void process_bio_success(struct thin_c *tc, struct bio *bio)
bio              2080 drivers/md/dm-thin.c 	bio_endio(bio);
bio              2083 drivers/md/dm-thin.c static void process_bio_fail(struct thin_c *tc, struct bio *bio)
bio              2085 drivers/md/dm-thin.c 	bio_io_error(bio);
bio              2111 drivers/md/dm-thin.c static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
bio              2115 drivers/md/dm-thin.c 	sector_t bi_sector = bio->bi_iter.bi_sector;
bio              2129 drivers/md/dm-thin.c 	pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              2138 drivers/md/dm-thin.c 	struct bio *bio;
bio              2142 drivers/md/dm-thin.c 		bio = thin_bio(pbd);
bio              2144 drivers/md/dm-thin.c 		bio_list_add(&tc->deferred_bio_list, bio);
bio              2153 drivers/md/dm-thin.c 	struct bio *bio;
bio              2161 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&bios)))
bio              2162 drivers/md/dm-thin.c 		__thin_bio_rb_add(tc, bio);
bio              2176 drivers/md/dm-thin.c 	struct bio *bio;
bio              2204 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&bios))) {
bio              2212 drivers/md/dm-thin.c 			bio_list_add(&tc->deferred_bio_list, bio);
bio              2218 drivers/md/dm-thin.c 		if (bio_op(bio) == REQ_OP_DISCARD)
bio              2219 drivers/md/dm-thin.c 			pool->process_discard(tc, bio);
bio              2221 drivers/md/dm-thin.c 			pool->process_bio(tc, bio);
bio              2355 drivers/md/dm-thin.c 	struct bio *bio;
bio              2388 drivers/md/dm-thin.c 		while ((bio = bio_list_pop(&bios)))
bio              2389 drivers/md/dm-thin.c 			bio_io_error(bio);
bio              2394 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&bio_completions)))
bio              2395 drivers/md/dm-thin.c 		bio_endio(bio);
bio              2397 drivers/md/dm-thin.c 	while ((bio = bio_list_pop(&bios))) {
bio              2402 drivers/md/dm-thin.c 		if (bio->bi_opf & REQ_PREFLUSH)
bio              2403 drivers/md/dm-thin.c 			bio_endio(bio);
bio              2405 drivers/md/dm-thin.c 			generic_make_request(bio);
bio              2668 drivers/md/dm-thin.c static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
bio              2674 drivers/md/dm-thin.c 	bio_list_add(&tc->deferred_bio_list, bio);
bio              2680 drivers/md/dm-thin.c static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
bio              2685 drivers/md/dm-thin.c 	thin_defer_bio(tc, bio);
bio              2703 drivers/md/dm-thin.c static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
bio              2705 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio              2717 drivers/md/dm-thin.c static int thin_bio_map(struct dm_target *ti, struct bio *bio)
bio              2721 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
bio              2727 drivers/md/dm-thin.c 	thin_hook_bio(tc, bio);
bio              2730 drivers/md/dm-thin.c 		bio->bi_status = BLK_STS_DM_REQUEUE;
bio              2731 drivers/md/dm-thin.c 		bio_endio(bio);
bio              2736 drivers/md/dm-thin.c 		bio_io_error(bio);
bio              2740 drivers/md/dm-thin.c 	if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
bio              2741 drivers/md/dm-thin.c 		thin_defer_bio_with_throttle(tc, bio);
bio              2750 drivers/md/dm-thin.c 	if (bio_detain(tc->pool, &key, bio, &virt_cell))
bio              2780 drivers/md/dm-thin.c 		if (bio_detain(tc->pool, &key, bio, &data_cell)) {
bio              2785 drivers/md/dm-thin.c 		inc_all_io_entry(tc->pool, bio);
bio              2789 drivers/md/dm-thin.c 		remap(tc, bio, result.block);
bio              2803 drivers/md/dm-thin.c 		bio_io_error(bio);
bio              3227 drivers/md/dm-thin.c 	struct bio *flush_bio = &pt->flush_bio;
bio              3452 drivers/md/dm-thin.c static int pool_map(struct dm_target *ti, struct bio *bio)
bio              3463 drivers/md/dm-thin.c 	bio_set_dev(bio, pt->data_dev->bdev);
bio              4335 drivers/md/dm-thin.c static int thin_map(struct dm_target *ti, struct bio *bio)
bio              4337 drivers/md/dm-thin.c 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio              4339 drivers/md/dm-thin.c 	return thin_bio_map(ti, bio);
bio              4342 drivers/md/dm-thin.c static int thin_endio(struct dm_target *ti, struct bio *bio,
bio              4346 drivers/md/dm-thin.c 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
bio               116 drivers/md/dm-unstripe.c static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
bio               119 drivers/md/dm-unstripe.c 	sector_t sector = bio->bi_iter.bi_sector;
bio               134 drivers/md/dm-unstripe.c static int unstripe_map(struct dm_target *ti, struct bio *bio)
bio               138 drivers/md/dm-unstripe.c 	bio_set_dev(bio, uc->dev->bdev);
bio               139 drivers/md/dm-unstripe.c 	bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
bio               371 drivers/md/dm-verity-target.c 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio               378 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
bio               400 drivers/md/dm-verity-target.c 		bio_advance_iter(bio, iter, len);
bio               418 drivers/md/dm-verity-target.c 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio               424 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
bio               438 drivers/md/dm-verity-target.c 		bio_advance_iter(bio, iter, len);
bio               459 drivers/md/dm-verity-target.c 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio               461 drivers/md/dm-verity-target.c 	bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
bio               542 drivers/md/dm-verity-target.c 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio               544 drivers/md/dm-verity-target.c 	bio->bi_end_io = io->orig_bi_end_io;
bio               545 drivers/md/dm-verity-target.c 	bio->bi_status = status;
bio               549 drivers/md/dm-verity-target.c 	bio_endio(bio);
bio               559 drivers/md/dm-verity-target.c static void verity_end_io(struct bio *bio)
bio               561 drivers/md/dm-verity-target.c 	struct dm_verity_io *io = bio->bi_private;
bio               563 drivers/md/dm-verity-target.c 	if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
bio               564 drivers/md/dm-verity-target.c 		verity_finish_io(io, bio->bi_status);
bio               633 drivers/md/dm-verity-target.c static int verity_map(struct dm_target *ti, struct bio *bio)
bio               638 drivers/md/dm-verity-target.c 	bio_set_dev(bio, v->data_dev->bdev);
bio               639 drivers/md/dm-verity-target.c 	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
bio               641 drivers/md/dm-verity-target.c 	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
bio               647 drivers/md/dm-verity-target.c 	if (bio_end_sector(bio) >>
bio               653 drivers/md/dm-verity-target.c 	if (bio_data_dir(bio) == WRITE)
bio               656 drivers/md/dm-verity-target.c 	io = dm_per_bio_data(bio, ti->per_io_data_size);
bio               658 drivers/md/dm-verity-target.c 	io->orig_bi_end_io = bio->bi_end_io;
bio               659 drivers/md/dm-verity-target.c 	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
bio               660 drivers/md/dm-verity-target.c 	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio               662 drivers/md/dm-verity-target.c 	bio->bi_end_io = verity_end_io;
bio               663 drivers/md/dm-verity-target.c 	bio->bi_private = io;
bio               664 drivers/md/dm-verity-target.c 	io->iter = bio->bi_iter;
bio               670 drivers/md/dm-verity-target.c 	generic_make_request(bio);
bio               194 drivers/md/dm-writecache.c 	struct bio bio;
bio              1066 drivers/md/dm-writecache.c static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
bio              1071 drivers/md/dm-writecache.c 	int rw = bio_data_dir(bio);
bio              1075 drivers/md/dm-writecache.c 		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
bio              1084 drivers/md/dm-writecache.c 			flush_dcache_page(bio_page(bio));
bio              1087 drivers/md/dm-writecache.c 				bio->bi_status = BLK_STS_IOERR;
bio              1090 drivers/md/dm-writecache.c 			flush_dcache_page(bio_page(bio));
bio              1098 drivers/md/dm-writecache.c 		bio_advance(bio, size);
bio              1107 drivers/md/dm-writecache.c 		struct bio *bio;
bio              1110 drivers/md/dm-writecache.c 		bio = bio_list_pop(&wc->flush_list);
bio              1111 drivers/md/dm-writecache.c 		if (!bio) {
bio              1124 drivers/md/dm-writecache.c 		if (bio_op(bio) == REQ_OP_DISCARD) {
bio              1125 drivers/md/dm-writecache.c 			writecache_discard(wc, bio->bi_iter.bi_sector,
bio              1126 drivers/md/dm-writecache.c 					   bio_end_sector(bio));
bio              1128 drivers/md/dm-writecache.c 			bio_set_dev(bio, wc->dev->bdev);
bio              1129 drivers/md/dm-writecache.c 			generic_make_request(bio);
bio              1134 drivers/md/dm-writecache.c 				bio->bi_status = BLK_STS_IOERR;
bio              1135 drivers/md/dm-writecache.c 			bio_endio(bio);
bio              1142 drivers/md/dm-writecache.c static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
bio              1146 drivers/md/dm-writecache.c 	bio_list_add(&wc->flush_list, bio);
bio              1149 drivers/md/dm-writecache.c static int writecache_map(struct dm_target *ti, struct bio *bio)
bio              1154 drivers/md/dm-writecache.c 	bio->bi_private = NULL;
bio              1158 drivers/md/dm-writecache.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
bio              1167 drivers/md/dm-writecache.c 			writecache_offload_bio(wc, bio);
bio              1172 drivers/md/dm-writecache.c 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio              1174 drivers/md/dm-writecache.c 	if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
bio              1177 drivers/md/dm-writecache.c 		      (unsigned long long)bio->bi_iter.bi_sector,
bio              1178 drivers/md/dm-writecache.c 		      bio->bi_iter.bi_size, wc->block_size);
bio              1182 drivers/md/dm-writecache.c 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
bio              1186 drivers/md/dm-writecache.c 			writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
bio              1189 drivers/md/dm-writecache.c 			writecache_offload_bio(wc, bio);
bio              1194 drivers/md/dm-writecache.c 	if (bio_data_dir(bio) == READ) {
bio              1196 drivers/md/dm-writecache.c 		e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
bio              1197 drivers/md/dm-writecache.c 		if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
bio              1199 drivers/md/dm-writecache.c 				bio_copy_block(wc, bio, memory_data(wc, e));
bio              1200 drivers/md/dm-writecache.c 				if (bio->bi_iter.bi_size)
bio              1204 drivers/md/dm-writecache.c 				dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
bio              1205 drivers/md/dm-writecache.c 				bio_set_dev(bio, wc->ssd_dev->bdev);
bio              1206 drivers/md/dm-writecache.c 				bio->bi_iter.bi_sector = cache_sector(wc, e);
bio              1214 drivers/md/dm-writecache.c 					read_original_sector(wc, e) - bio->bi_iter.bi_sector;
bio              1215 drivers/md/dm-writecache.c 				if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
bio              1216 drivers/md/dm-writecache.c 					dm_accept_partial_bio(bio, next_boundary);
bio              1225 drivers/md/dm-writecache.c 			e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
bio              1239 drivers/md/dm-writecache.c 			write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
bio              1244 drivers/md/dm-writecache.c 				bio_copy_block(wc, bio, memory_data(wc, e));
bio              1246 drivers/md/dm-writecache.c 				dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
bio              1247 drivers/md/dm-writecache.c 				bio_set_dev(bio, wc->ssd_dev->bdev);
bio              1248 drivers/md/dm-writecache.c 				bio->bi_iter.bi_sector = cache_sector(wc, e);
bio              1257 drivers/md/dm-writecache.c 		} while (bio->bi_iter.bi_size);
bio              1259 drivers/md/dm-writecache.c 		if (unlikely(bio->bi_opf & REQ_FUA ||
bio              1268 drivers/md/dm-writecache.c 	bio_set_dev(bio, wc->dev->bdev);
bio              1274 drivers/md/dm-writecache.c 	bio->bi_private = (void *)1;
bio              1275 drivers/md/dm-writecache.c 	atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
bio              1281 drivers/md/dm-writecache.c 	bio_endio(bio);
bio              1290 drivers/md/dm-writecache.c 	bio_io_error(bio);
bio              1294 drivers/md/dm-writecache.c static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
bio              1298 drivers/md/dm-writecache.c 	if (bio->bi_private != NULL) {
bio              1299 drivers/md/dm-writecache.c 		int dir = bio_data_dir(bio);
bio              1330 drivers/md/dm-writecache.c static void writecache_writeback_endio(struct bio *bio)
bio              1332 drivers/md/dm-writecache.c 	struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
bio              1368 drivers/md/dm-writecache.c 		if (unlikely(wb->bio.bi_status != BLK_STS_OK))
bio              1369 drivers/md/dm-writecache.c 			writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
bio              1370 drivers/md/dm-writecache.c 					"write error %d", wb->bio.bi_status);
bio              1392 drivers/md/dm-writecache.c 		bio_put(&wb->bio);
bio              1479 drivers/md/dm-writecache.c 	return bio_add_page(&wb->bio, persistent_memory_page(address),
bio              1504 drivers/md/dm-writecache.c 	struct bio *bio;
bio              1515 drivers/md/dm-writecache.c 		bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
bio              1516 drivers/md/dm-writecache.c 		wb = container_of(bio, struct writeback_struct, bio);
bio              1518 drivers/md/dm-writecache.c 		bio->bi_end_io = writecache_writeback_endio;
bio              1519 drivers/md/dm-writecache.c 		bio_set_dev(bio, wc->dev->bdev);
bio              1520 drivers/md/dm-writecache.c 		bio->bi_iter.bi_sector = read_original_sector(wc, e);
bio              1546 drivers/md/dm-writecache.c 		bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
bio              1548 drivers/md/dm-writecache.c 			bio->bi_status = BLK_STS_IOERR;
bio              1549 drivers/md/dm-writecache.c 			bio_endio(bio);
bio              1551 drivers/md/dm-writecache.c 			submit_bio(bio);
bio              1962 drivers/md/dm-writecache.c 				offsetof(struct writeback_struct, bio),
bio                36 drivers/md/dm-zero.c static int zero_map(struct dm_target *ti, struct bio *bio)
bio                38 drivers/md/dm-zero.c 	switch (bio_op(bio)) {
bio                40 drivers/md/dm-zero.c 		if (bio->bi_opf & REQ_RAHEAD) {
bio                44 drivers/md/dm-zero.c 		zero_fill_bio(bio);
bio                53 drivers/md/dm-zero.c 	bio_endio(bio);
bio               377 drivers/md/dm-zoned-metadata.c static void dmz_mblock_bio_end_io(struct bio *bio)
bio               379 drivers/md/dm-zoned-metadata.c 	struct dmz_mblock *mblk = bio->bi_private;
bio               382 drivers/md/dm-zoned-metadata.c 	if (bio->bi_status)
bio               385 drivers/md/dm-zoned-metadata.c 	if (bio_op(bio) == REQ_OP_WRITE)
bio               394 drivers/md/dm-zoned-metadata.c 	bio_put(bio);
bio               405 drivers/md/dm-zoned-metadata.c 	struct bio *bio;
bio               415 drivers/md/dm-zoned-metadata.c 	bio = bio_alloc(GFP_NOIO, 1);
bio               416 drivers/md/dm-zoned-metadata.c 	if (!bio) {
bio               431 drivers/md/dm-zoned-metadata.c 		bio_put(bio);
bio               442 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio               443 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bio               444 drivers/md/dm-zoned-metadata.c 	bio->bi_private = mblk;
bio               445 drivers/md/dm-zoned-metadata.c 	bio->bi_end_io = dmz_mblock_bio_end_io;
bio               446 drivers/md/dm-zoned-metadata.c 	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio               447 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
bio               448 drivers/md/dm-zoned-metadata.c 	submit_bio(bio);
bio               583 drivers/md/dm-zoned-metadata.c 	struct bio *bio;
bio               588 drivers/md/dm-zoned-metadata.c 	bio = bio_alloc(GFP_NOIO, 1);
bio               589 drivers/md/dm-zoned-metadata.c 	if (!bio) {
bio               596 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio               597 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bio               598 drivers/md/dm-zoned-metadata.c 	bio->bi_private = mblk;
bio               599 drivers/md/dm-zoned-metadata.c 	bio->bi_end_io = dmz_mblock_bio_end_io;
bio               600 drivers/md/dm-zoned-metadata.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio               601 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
bio               602 drivers/md/dm-zoned-metadata.c 	submit_bio(bio);
bio               613 drivers/md/dm-zoned-metadata.c 	struct bio *bio;
bio               619 drivers/md/dm-zoned-metadata.c 	bio = bio_alloc(GFP_NOIO, 1);
bio               620 drivers/md/dm-zoned-metadata.c 	if (!bio)
bio               623 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio               624 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bio               625 drivers/md/dm-zoned-metadata.c 	bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio               626 drivers/md/dm-zoned-metadata.c 	bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
bio               627 drivers/md/dm-zoned-metadata.c 	ret = submit_bio_wait(bio);
bio               628 drivers/md/dm-zoned-metadata.c 	bio_put(bio);
bio                22 drivers/md/dm-zoned-target.c 	struct bio		*bio;
bio                77 drivers/md/dm-zoned-target.c static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
bio                79 drivers/md/dm-zoned-target.c 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
bio                81 drivers/md/dm-zoned-target.c 	if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio                82 drivers/md/dm-zoned-target.c 		bio->bi_status = status;
bio                83 drivers/md/dm-zoned-target.c 	if (bio->bi_status != BLK_STS_OK)
bio                90 drivers/md/dm-zoned-target.c 			if (bio->bi_status != BLK_STS_OK &&
bio                91 drivers/md/dm-zoned-target.c 			    bio_op(bio) == REQ_OP_WRITE &&
bio                96 drivers/md/dm-zoned-target.c 		bio_endio(bio);
bio               104 drivers/md/dm-zoned-target.c static void dmz_clone_endio(struct bio *clone)
bio               110 drivers/md/dm-zoned-target.c 	dmz_bio_endio(bioctx->bio, status);
bio               118 drivers/md/dm-zoned-target.c 			  struct bio *bio, sector_t chunk_block,
bio               121 drivers/md/dm-zoned-target.c 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
bio               122 drivers/md/dm-zoned-target.c 	struct bio *clone;
bio               124 drivers/md/dm-zoned-target.c 	clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
bio               135 drivers/md/dm-zoned-target.c 	bio_advance(bio, clone->bi_iter.bi_size);
bio               140 drivers/md/dm-zoned-target.c 	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
bio               149 drivers/md/dm-zoned-target.c static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
bio               155 drivers/md/dm-zoned-target.c 	swap(bio->bi_iter.bi_size, size);
bio               156 drivers/md/dm-zoned-target.c 	zero_fill_bio(bio);
bio               157 drivers/md/dm-zoned-target.c 	swap(bio->bi_iter.bi_size, size);
bio               159 drivers/md/dm-zoned-target.c 	bio_advance(bio, size);
bio               166 drivers/md/dm-zoned-target.c 			   struct bio *bio)
bio               168 drivers/md/dm-zoned-target.c 	sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
bio               169 drivers/md/dm-zoned-target.c 	unsigned int nr_blocks = dmz_bio_blocks(bio);
bio               176 drivers/md/dm-zoned-target.c 		zero_fill_bio(bio);
bio               181 drivers/md/dm-zoned-target.c 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
bio               220 drivers/md/dm-zoned-target.c 			ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
bio               226 drivers/md/dm-zoned-target.c 			dmz_handle_read_zero(dmz, bio, chunk_block, 1);
bio               240 drivers/md/dm-zoned-target.c 				   struct dm_zone *zone, struct bio *bio,
bio               252 drivers/md/dm-zoned-target.c 	ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
bio               273 drivers/md/dm-zoned-target.c 				     struct dm_zone *zone, struct bio *bio,
bio               290 drivers/md/dm-zoned-target.c 	ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
bio               309 drivers/md/dm-zoned-target.c 			    struct bio *bio)
bio               311 drivers/md/dm-zoned-target.c 	sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
bio               312 drivers/md/dm-zoned-target.c 	unsigned int nr_blocks = dmz_bio_blocks(bio);
bio               318 drivers/md/dm-zoned-target.c 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
bio               329 drivers/md/dm-zoned-target.c 		return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
bio               336 drivers/md/dm-zoned-target.c 	return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
bio               343 drivers/md/dm-zoned-target.c 			      struct bio *bio)
bio               346 drivers/md/dm-zoned-target.c 	sector_t block = dmz_bio_block(bio);
bio               347 drivers/md/dm-zoned-target.c 	unsigned int nr_blocks = dmz_bio_blocks(bio);
bio               359 drivers/md/dm-zoned-target.c 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
bio               379 drivers/md/dm-zoned-target.c 			   struct bio *bio)
bio               381 drivers/md/dm-zoned-target.c 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
bio               390 drivers/md/dm-zoned-target.c 	if (bio_op(bio) == REQ_OP_WRITE)
bio               405 drivers/md/dm-zoned-target.c 	zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
bio               406 drivers/md/dm-zoned-target.c 				     bio_op(bio));
bio               418 drivers/md/dm-zoned-target.c 	switch (bio_op(bio)) {
bio               420 drivers/md/dm-zoned-target.c 		ret = dmz_handle_read(dmz, zone, bio);
bio               423 drivers/md/dm-zoned-target.c 		ret = dmz_handle_write(dmz, zone, bio);
bio               427 drivers/md/dm-zoned-target.c 		ret = dmz_handle_discard(dmz, zone, bio);
bio               431 drivers/md/dm-zoned-target.c 			    bio_op(bio));
bio               442 drivers/md/dm-zoned-target.c 	dmz_bio_endio(bio, errno_to_blk_status(ret));
bio               475 drivers/md/dm-zoned-target.c 	struct bio *bio;
bio               480 drivers/md/dm-zoned-target.c 	while ((bio = bio_list_pop(&cw->bio_list))) {
bio               482 drivers/md/dm-zoned-target.c 		dmz_handle_bio(dmz, cw, bio);
bio               499 drivers/md/dm-zoned-target.c 	struct bio *bio;
bio               510 drivers/md/dm-zoned-target.c 		bio = bio_list_pop(&dmz->flush_list);
bio               513 drivers/md/dm-zoned-target.c 		if (!bio)
bio               516 drivers/md/dm-zoned-target.c 		dmz_bio_endio(bio, errno_to_blk_status(ret));
bio               526 drivers/md/dm-zoned-target.c static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio               528 drivers/md/dm-zoned-target.c 	unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
bio               559 drivers/md/dm-zoned-target.c 	bio_list_add(&cw->bio_list, bio);
bio               618 drivers/md/dm-zoned-target.c static int dmz_map(struct dm_target *ti, struct bio *bio)
bio               622 drivers/md/dm-zoned-target.c 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
bio               623 drivers/md/dm-zoned-target.c 	sector_t sector = bio->bi_iter.bi_sector;
bio               624 drivers/md/dm-zoned-target.c 	unsigned int nr_sectors = bio_sectors(bio);
bio               632 drivers/md/dm-zoned-target.c 		      bio_op(bio), (unsigned long long)sector, nr_sectors,
bio               633 drivers/md/dm-zoned-target.c 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
bio               634 drivers/md/dm-zoned-target.c 		      (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
bio               635 drivers/md/dm-zoned-target.c 		      (unsigned int)dmz_bio_blocks(bio));
bio               637 drivers/md/dm-zoned-target.c 	bio_set_dev(bio, dev->bdev);
bio               639 drivers/md/dm-zoned-target.c 	if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
bio               649 drivers/md/dm-zoned-target.c 	bioctx->bio = bio;
bio               653 drivers/md/dm-zoned-target.c 	if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
bio               655 drivers/md/dm-zoned-target.c 		bio_list_add(&dmz->flush_list, bio);
bio               664 drivers/md/dm-zoned-target.c 		dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
bio               667 drivers/md/dm-zoned-target.c 	ret = dmz_queue_chunk_work(dmz, bio);
bio               671 drivers/md/dm-zoned-target.c 			      bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
bio                45 drivers/md/dm-zoned.h #define dmz_bio_block(bio)	dmz_sect2blk((bio)->bi_iter.bi_sector)
bio                46 drivers/md/dm-zoned.h #define dmz_bio_blocks(bio)	dmz_sect2blk(bio_sectors(bio))
bio                69 drivers/md/dm-zoned.h #define dmz_bio_chunk(dev, bio)	((bio)->bi_iter.bi_sector >> \
bio                67 drivers/md/dm.c 	struct bio *bio;
bio                84 drivers/md/dm.c 	struct bio clone;
bio                97 drivers/md/dm.c 	struct bio *orig_bio;
bio               105 drivers/md/dm.c void *dm_per_bio_data(struct bio *bio, size_t data_size)
bio               107 drivers/md/dm.c 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
bio               109 drivers/md/dm.c 		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
bio               110 drivers/md/dm.c 	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
bio               114 drivers/md/dm.c struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
bio               118 drivers/md/dm.c 		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
bio               120 drivers/md/dm.c 	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
bio               124 drivers/md/dm.c unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
bio               126 drivers/md/dm.c 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
bio               565 drivers/md/dm.c static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
bio               569 drivers/md/dm.c 	struct bio *clone;
bio               583 drivers/md/dm.c 	io->orig_bio = bio;
bio               606 drivers/md/dm.c 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
bio               654 drivers/md/dm.c 	struct bio *bio = io->orig_bio;
bio               658 drivers/md/dm.c 	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
bio               662 drivers/md/dm.c 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio               663 drivers/md/dm.c 				    bio->bi_iter.bi_sector, bio_sectors(bio),
bio               670 drivers/md/dm.c 	struct bio *bio = io->orig_bio;
bio               673 drivers/md/dm.c 	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
bio               677 drivers/md/dm.c 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio               678 drivers/md/dm.c 				    bio->bi_iter.bi_sector, bio_sectors(bio),
bio               689 drivers/md/dm.c static void queue_io(struct mapped_device *md, struct bio *bio)
bio               694 drivers/md/dm.c 	bio_list_add(&md->deferred, bio);
bio               898 drivers/md/dm.c 	struct bio *bio;
bio               925 drivers/md/dm.c 		bio = io->orig_bio;
bio               932 drivers/md/dm.c 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
bio               937 drivers/md/dm.c 			bio->bi_opf &= ~REQ_PREFLUSH;
bio               938 drivers/md/dm.c 			queue_io(md, bio);
bio               942 drivers/md/dm.c 				bio->bi_status = io_error;
bio               943 drivers/md/dm.c 			bio_endio(bio);
bio               973 drivers/md/dm.c static void clone_endio(struct bio *bio)
bio               975 drivers/md/dm.c 	blk_status_t error = bio->bi_status;
bio               976 drivers/md/dm.c 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
bio               982 drivers/md/dm.c 		if (bio_op(bio) == REQ_OP_DISCARD &&
bio               983 drivers/md/dm.c 		    !bio->bi_disk->queue->limits.max_discard_sectors)
bio               985 drivers/md/dm.c 		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
bio               986 drivers/md/dm.c 			 !bio->bi_disk->queue->limits.max_write_same_sectors)
bio               988 drivers/md/dm.c 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
bio               989 drivers/md/dm.c 			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
bio               994 drivers/md/dm.c 		int r = endio(tio->ti, bio, &error);
bio              1203 drivers/md/dm.c void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
bio              1205 drivers/md/dm.c 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
bio              1206 drivers/md/dm.c 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
bio              1207 drivers/md/dm.c 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
bio              1211 drivers/md/dm.c 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
bio              1267 drivers/md/dm.c 	struct bio *clone = &tio->clone;
bio              1312 drivers/md/dm.c static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bio              1314 drivers/md/dm.c 	bio->bi_iter.bi_sector = sector;
bio              1315 drivers/md/dm.c 	bio->bi_iter.bi_size = to_bytes(len);
bio              1321 drivers/md/dm.c static int clone_bio(struct dm_target_io *tio, struct bio *bio,
bio              1324 drivers/md/dm.c 	struct bio *clone = &tio->clone;
bio              1326 drivers/md/dm.c 	__bio_clone_fast(clone, bio);
bio              1328 drivers/md/dm.c 	if (bio_integrity(bio)) {
bio              1339 drivers/md/dm.c 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
bio              1347 drivers/md/dm.c 	if (bio_integrity(bio))
bio              1370 drivers/md/dm.c 		struct bio *bio;
bio              1386 drivers/md/dm.c 		while ((bio = bio_list_pop(blist))) {
bio              1387 drivers/md/dm.c 			tio = container_of(bio, struct dm_target_io, clone);
bio              1396 drivers/md/dm.c 	struct bio *clone = &tio->clone;
bio              1400 drivers/md/dm.c 	__bio_clone_fast(clone, ci->bio);
bio              1411 drivers/md/dm.c 	struct bio *bio;
bio              1416 drivers/md/dm.c 	while ((bio = bio_list_pop(&blist))) {
bio              1417 drivers/md/dm.c 		tio = container_of(bio, struct dm_target_io, clone);
bio              1434 drivers/md/dm.c 	bio_set_dev(ci->bio, ci->io->md->bdev);
bio              1436 drivers/md/dm.c 	BUG_ON(bio_has_data(ci->bio));
bio              1440 drivers/md/dm.c 	bio_disassociate_blkg(ci->bio);
bio              1448 drivers/md/dm.c 	struct bio *bio = ci->bio;
bio              1454 drivers/md/dm.c 	r = clone_bio(tio, bio, sector, *len);
bio              1530 drivers/md/dm.c static bool is_abnormal_io(struct bio *bio)
bio              1534 drivers/md/dm.c 	switch (bio_op(bio)) {
bio              1549 drivers/md/dm.c 	struct bio *bio = ci->bio;
bio              1551 drivers/md/dm.c 	if (bio_op(bio) == REQ_OP_DISCARD)
bio              1553 drivers/md/dm.c 	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
bio              1555 drivers/md/dm.c 	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
bio              1557 drivers/md/dm.c 	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
bio              1594 drivers/md/dm.c 			    struct dm_table *map, struct bio *bio)
bio              1597 drivers/md/dm.c 	ci->io = alloc_io(md, bio);
bio              1598 drivers/md/dm.c 	ci->sector = bio->bi_iter.bi_sector;
bio              1608 drivers/md/dm.c 					struct dm_table *map, struct bio *bio)
bio              1614 drivers/md/dm.c 	init_clone_info(&ci, md, map, bio);
bio              1616 drivers/md/dm.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio              1617 drivers/md/dm.c 		struct bio flush_bio;
bio              1626 drivers/md/dm.c 		ci.bio = &flush_bio;
bio              1630 drivers/md/dm.c 	} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
bio              1631 drivers/md/dm.c 		ci.bio = bio;
bio              1635 drivers/md/dm.c 		ci.bio = bio;
bio              1636 drivers/md/dm.c 		ci.sector_count = bio_sectors(bio);
bio              1648 drivers/md/dm.c 				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
bio              1661 drivers/md/dm.c 						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
bio              1664 drivers/md/dm.c 				bio_chain(b, bio);
bio              1665 drivers/md/dm.c 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
bio              1666 drivers/md/dm.c 				ret = generic_make_request(bio);
bio              1682 drivers/md/dm.c 			      struct bio *bio, struct dm_target *ti)
bio              1688 drivers/md/dm.c 	init_clone_info(&ci, md, map, bio);
bio              1690 drivers/md/dm.c 	if (bio->bi_opf & REQ_PREFLUSH) {
bio              1691 drivers/md/dm.c 		struct bio flush_bio;
bio              1700 drivers/md/dm.c 		ci.bio = &flush_bio;
bio              1707 drivers/md/dm.c 		ci.bio = bio;
bio              1708 drivers/md/dm.c 		ci.sector_count = bio_sectors(bio);
bio              1721 drivers/md/dm.c static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
bio              1725 drivers/md/dm.c 	sector_count = bio_sectors(*bio);
bio              1726 drivers/md/dm.c 	len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
bio              1729 drivers/md/dm.c 		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
bio              1731 drivers/md/dm.c 		bio_chain(split, *bio);
bio              1732 drivers/md/dm.c 		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
bio              1733 drivers/md/dm.c 		generic_make_request(*bio);
bio              1734 drivers/md/dm.c 		*bio = split;
bio              1739 drivers/md/dm.c 			       struct dm_table *map, struct bio *bio)
bio              1745 drivers/md/dm.c 		bio_io_error(bio);
bio              1750 drivers/md/dm.c 		ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
bio              1752 drivers/md/dm.c 			bio_io_error(bio);
bio              1763 drivers/md/dm.c 		if (is_abnormal_io(bio))
bio              1764 drivers/md/dm.c 			blk_queue_split(md->queue, &bio);
bio              1766 drivers/md/dm.c 			dm_queue_split(md, ti, &bio);
bio              1770 drivers/md/dm.c 		return __process_bio(md, map, bio, ti);
bio              1772 drivers/md/dm.c 		return __split_and_process_bio(md, map, bio);
bio              1775 drivers/md/dm.c static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
bio              1788 drivers/md/dm.c 		if (!(bio->bi_opf & REQ_RAHEAD))
bio              1789 drivers/md/dm.c 			queue_io(md, bio);
bio              1791 drivers/md/dm.c 			bio_io_error(bio);
bio              1795 drivers/md/dm.c 	ret = dm_process_bio(md, map, bio);
bio              2463 drivers/md/dm.c 	struct bio *c;
bio                64 drivers/md/md-faulty.c static void faulty_fail(struct bio *bio)
bio                66 drivers/md/md-faulty.c 	struct bio *b = bio->bi_private;
bio                68 drivers/md/md-faulty.c 	b->bi_iter.bi_size = bio->bi_iter.bi_size;
bio                69 drivers/md/md-faulty.c 	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio                71 drivers/md/md-faulty.c 	bio_put(bio);
bio               164 drivers/md/md-faulty.c static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
bio               169 drivers/md/md-faulty.c 	if (bio_data_dir(bio) == WRITE) {
bio               175 drivers/md/md-faulty.c 			bio_io_error(bio);
bio               179 drivers/md/md-faulty.c 		if (check_sector(conf, bio->bi_iter.bi_sector,
bio               180 drivers/md/md-faulty.c 				 bio_end_sector(bio), WRITE))
bio               183 drivers/md/md-faulty.c 			add_sector(conf, bio->bi_iter.bi_sector,
bio               191 drivers/md/md-faulty.c 		if (check_sector(conf, bio->bi_iter.bi_sector,
bio               192 drivers/md/md-faulty.c 				 bio_end_sector(bio), READ))
bio               197 drivers/md/md-faulty.c 			add_sector(conf, bio->bi_iter.bi_sector,
bio               202 drivers/md/md-faulty.c 			add_sector(conf, bio->bi_iter.bi_sector,
bio               208 drivers/md/md-faulty.c 		struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio               211 drivers/md/md-faulty.c 		b->bi_private = bio;
bio               213 drivers/md/md-faulty.c 		bio = b;
bio               215 drivers/md/md-faulty.c 		bio_set_dev(bio, conf->rdev->bdev);
bio               217 drivers/md/md-faulty.c 	generic_make_request(bio);
bio               240 drivers/md/md-linear.c static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio               245 drivers/md/md-linear.c 	sector_t bio_sector = bio->bi_iter.bi_sector;
bio               247 drivers/md/md-linear.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
bio               248 drivers/md/md-linear.c 	    && md_flush_request(mddev, bio))
bio               261 drivers/md/md-linear.c 		bio_io_error(bio);
bio               265 drivers/md/md-linear.c 	if (unlikely(bio_end_sector(bio) > end_sector)) {
bio               267 drivers/md/md-linear.c 		struct bio *split = bio_split(bio, end_sector - bio_sector,
bio               269 drivers/md/md-linear.c 		bio_chain(split, bio);
bio               270 drivers/md/md-linear.c 		generic_make_request(bio);
bio               271 drivers/md/md-linear.c 		bio = split;
bio               274 drivers/md/md-linear.c 	bio_set_dev(bio, tmp_dev->rdev->bdev);
bio               275 drivers/md/md-linear.c 	bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
bio               278 drivers/md/md-linear.c 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
bio               279 drivers/md/md-linear.c 		     !blk_queue_discard(bio->bi_disk->queue))) {
bio               281 drivers/md/md-linear.c 		bio_endio(bio);
bio               284 drivers/md/md-linear.c 			trace_block_bio_remap(bio->bi_disk->queue,
bio               285 drivers/md/md-linear.c 					      bio, disk_devt(mddev->gendisk),
bio               287 drivers/md/md-linear.c 		mddev_check_writesame(mddev, bio);
bio               288 drivers/md/md-linear.c 		mddev_check_write_zeroes(mddev, bio);
bio               289 drivers/md/md-linear.c 		generic_make_request(bio);
bio               296 drivers/md/md-linear.c 	       (unsigned long long)bio->bi_iter.bi_sector,
bio               300 drivers/md/md-linear.c 	bio_io_error(bio);
bio                70 drivers/md/md-multipath.c 	struct bio *bio = mp_bh->master_bio;
bio                73 drivers/md/md-multipath.c 	bio->bi_status = status;
bio                74 drivers/md/md-multipath.c 	bio_endio(bio);
bio                78 drivers/md/md-multipath.c static void multipath_end_request(struct bio *bio)
bio                80 drivers/md/md-multipath.c 	struct multipath_bh *mp_bh = bio->bi_private;
bio                84 drivers/md/md-multipath.c 	if (!bio->bi_status)
bio                86 drivers/md/md-multipath.c 	else if (!(bio->bi_opf & REQ_RAHEAD)) {
bio                94 drivers/md/md-multipath.c 			(unsigned long long)bio->bi_iter.bi_sector);
bio                97 drivers/md/md-multipath.c 		multipath_end_bh_io(mp_bh, bio->bi_status);
bio               101 drivers/md/md-multipath.c static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
bio               107 drivers/md/md-multipath.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
bio               108 drivers/md/md-multipath.c 	    && md_flush_request(mddev, bio))
bio               113 drivers/md/md-multipath.c 	mp_bh->master_bio = bio;
bio               118 drivers/md/md-multipath.c 		bio_io_error(bio);
bio               124 drivers/md/md-multipath.c 	bio_init(&mp_bh->bio, NULL, 0);
bio               125 drivers/md/md-multipath.c 	__bio_clone_fast(&mp_bh->bio, bio);
bio               127 drivers/md/md-multipath.c 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
bio               128 drivers/md/md-multipath.c 	bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
bio               129 drivers/md/md-multipath.c 	mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
bio               130 drivers/md/md-multipath.c 	mp_bh->bio.bi_end_io = multipath_end_request;
bio               131 drivers/md/md-multipath.c 	mp_bh->bio.bi_private = mp_bh;
bio               132 drivers/md/md-multipath.c 	mddev_check_writesame(mddev, &mp_bh->bio);
bio               133 drivers/md/md-multipath.c 	mddev_check_write_zeroes(mddev, &mp_bh->bio);
bio               134 drivers/md/md-multipath.c 	generic_make_request(&mp_bh->bio);
bio               317 drivers/md/md-multipath.c 	struct bio *bio;
bio               332 drivers/md/md-multipath.c 		bio = &mp_bh->bio;
bio               333 drivers/md/md-multipath.c 		bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
bio               337 drivers/md/md-multipath.c 			       bio_devname(bio, b),
bio               338 drivers/md/md-multipath.c 			       (unsigned long long)bio->bi_iter.bi_sector);
bio               342 drivers/md/md-multipath.c 			       bio_devname(bio, b),
bio               343 drivers/md/md-multipath.c 			       (unsigned long long)bio->bi_iter.bi_sector);
bio               344 drivers/md/md-multipath.c 			*bio = *(mp_bh->master_bio);
bio               345 drivers/md/md-multipath.c 			bio->bi_iter.bi_sector +=
bio               347 drivers/md/md-multipath.c 			bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
bio               348 drivers/md/md-multipath.c 			bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio               349 drivers/md/md-multipath.c 			bio->bi_end_io = multipath_end_request;
bio               350 drivers/md/md-multipath.c 			bio->bi_private = mp_bh;
bio               351 drivers/md/md-multipath.c 			generic_make_request(bio);
bio                27 drivers/md/md-multipath.h 	struct bio		*master_bio;
bio                28 drivers/md/md-multipath.h 	struct bio		bio;
bio               253 drivers/md/md.c struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
bio               263 drivers/md/md.c static struct bio *md_bio_alloc_sync(struct mddev *mddev)
bio               326 drivers/md/md.c static bool is_suspended(struct mddev *mddev, struct bio *bio)
bio               330 drivers/md/md.c 	if (bio_data_dir(bio) != WRITE)
bio               334 drivers/md/md.c 	if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
bio               336 drivers/md/md.c 	if (bio_end_sector(bio) < mddev->suspend_lo)
bio               341 drivers/md/md.c void md_handle_request(struct mddev *mddev, struct bio *bio)
bio               345 drivers/md/md.c 	if (is_suspended(mddev, bio)) {
bio               350 drivers/md/md.c 			if (!is_suspended(mddev, bio))
bio               361 drivers/md/md.c 	if (!mddev->pers->make_request(mddev, bio)) {
bio               372 drivers/md/md.c static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
bio               374 drivers/md/md.c 	const int rw = bio_data_dir(bio);
bio               375 drivers/md/md.c 	const int sgrp = op_stat_group(bio_op(bio));
bio               380 drivers/md/md.c 		bio_io_error(bio);
bio               384 drivers/md/md.c 	blk_queue_split(q, &bio);
bio               387 drivers/md/md.c 		bio_io_error(bio);
bio               391 drivers/md/md.c 		if (bio_sectors(bio) != 0)
bio               392 drivers/md/md.c 			bio->bi_status = BLK_STS_IOERR;
bio               393 drivers/md/md.c 		bio_endio(bio);
bio               401 drivers/md/md.c 	sectors = bio_sectors(bio);
bio               403 drivers/md/md.c 	bio->bi_opf &= ~REQ_NOMERGE;
bio               405 drivers/md/md.c 	md_handle_request(mddev, bio);
bio               478 drivers/md/md.c static void md_end_flush(struct bio *bio)
bio               480 drivers/md/md.c 	struct md_rdev *rdev = bio->bi_private;
bio               489 drivers/md/md.c 	bio_put(bio);
bio               510 drivers/md/md.c 			struct bio *bi;
bio               532 drivers/md/md.c 	struct bio *bio = mddev->flush_bio;
bio               544 drivers/md/md.c 	if (bio->bi_iter.bi_size == 0) {
bio               546 drivers/md/md.c 		bio_endio(bio);
bio               548 drivers/md/md.c 		bio->bi_opf &= ~REQ_PREFLUSH;
bio               549 drivers/md/md.c 		md_handle_request(mddev, bio);
bio               559 drivers/md/md.c bool md_flush_request(struct mddev *mddev, struct bio *bio)
bio               569 drivers/md/md.c 		mddev->flush_bio = bio;
bio               570 drivers/md/md.c 		bio = NULL;
bio               574 drivers/md/md.c 	if (!bio) {
bio               579 drivers/md/md.c 		if (bio->bi_iter.bi_size == 0)
bio               581 drivers/md/md.c 			bio_endio(bio);
bio               583 drivers/md/md.c 			bio->bi_opf &= ~REQ_PREFLUSH;
bio               847 drivers/md/md.c static void super_written(struct bio *bio)
bio               849 drivers/md/md.c 	struct md_rdev *rdev = bio->bi_private;
bio               852 drivers/md/md.c 	if (bio->bi_status) {
bio               853 drivers/md/md.c 		pr_err("md: super_written gets error=%d\n", bio->bi_status);
bio               856 drivers/md/md.c 		    && (bio->bi_opf & MD_FAILFAST)) {
bio               866 drivers/md/md.c 	bio_put(bio);
bio               878 drivers/md/md.c 	struct bio *bio;
bio               887 drivers/md/md.c 	bio = md_bio_alloc_sync(mddev);
bio               891 drivers/md/md.c 	bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio               892 drivers/md/md.c 	bio->bi_iter.bi_sector = sector;
bio               893 drivers/md/md.c 	bio_add_page(bio, page, size, 0);
bio               894 drivers/md/md.c 	bio->bi_private = rdev;
bio               895 drivers/md/md.c 	bio->bi_end_io = super_written;
bio               901 drivers/md/md.c 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
bio               904 drivers/md/md.c 	submit_bio(bio);
bio               919 drivers/md/md.c 	struct bio *bio = md_bio_alloc_sync(rdev->mddev);
bio               923 drivers/md/md.c 		bio_set_dev(bio, rdev->meta_bdev);
bio               925 drivers/md/md.c 		bio_set_dev(bio, rdev->bdev);
bio               926 drivers/md/md.c 	bio_set_op_attrs(bio, op, op_flags);
bio               928 drivers/md/md.c 		bio->bi_iter.bi_sector = sector + rdev->sb_start;
bio               932 drivers/md/md.c 		bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
bio               934 drivers/md/md.c 		bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio               935 drivers/md/md.c 	bio_add_page(bio, page, size, 0);
bio               937 drivers/md/md.c 	submit_bio_wait(bio);
bio               939 drivers/md/md.c 	ret = !bio->bi_status;
bio               940 drivers/md/md.c 	bio_put(bio);
bio              8281 drivers/md/md.c bool md_write_start(struct mddev *mddev, struct bio *bi)
bio              8338 drivers/md/md.c void md_write_inc(struct mddev *mddev, struct bio *bi)
bio               483 drivers/md/md.h 	struct bio *flush_bio;
bio               542 drivers/md/md.h static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
bio               544 drivers/md/md.h 	atomic_add(nr_sectors, &bio->bi_disk->sync_io);
bio               553 drivers/md/md.h 	bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
bio               698 drivers/md/md.h extern bool md_write_start(struct mddev *mddev, struct bio *bi);
bio               699 drivers/md/md.h extern void md_write_inc(struct mddev *mddev, struct bio *bi);
bio               706 drivers/md/md.h extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
bio               731 drivers/md/md.h extern void md_handle_request(struct mddev *mddev, struct bio *bio);
bio               734 drivers/md/md.h extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
bio               780 drivers/md/md.h static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
bio               782 drivers/md/md.h 	if (bio_op(bio) == REQ_OP_WRITE_SAME &&
bio               783 drivers/md/md.h 	    !bio->bi_disk->queue->limits.max_write_same_sectors)
bio               787 drivers/md/md.h static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
bio               789 drivers/md/md.h 	if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
bio               790 drivers/md/md.h 	    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
bio               464 drivers/md/raid0.c 			unsigned int chunk_sects, struct bio *bio)
bio               468 drivers/md/raid0.c 			((bio->bi_iter.bi_sector & (chunk_sects-1))
bio               469 drivers/md/raid0.c 					+ bio_sectors(bio));
bio               471 drivers/md/raid0.c 		sector_t sector = bio->bi_iter.bi_sector;
bio               473 drivers/md/raid0.c 						+ bio_sectors(bio));
bio               477 drivers/md/raid0.c static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
bio               481 drivers/md/raid0.c 	sector_t start = bio->bi_iter.bi_sector;
bio               493 drivers/md/raid0.c 	if (bio_end_sector(bio) > zone->zone_end) {
bio               494 drivers/md/raid0.c 		struct bio *split = bio_split(bio,
bio               495 drivers/md/raid0.c 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
bio               497 drivers/md/raid0.c 		bio_chain(split, bio);
bio               498 drivers/md/raid0.c 		generic_make_request(bio);
bio               499 drivers/md/raid0.c 		bio = split;
bio               502 drivers/md/raid0.c 		end = bio_end_sector(bio);
bio               528 drivers/md/raid0.c 		struct bio *discard_bio = NULL;
bio               556 drivers/md/raid0.c 		bio_chain(discard_bio, bio);
bio               557 drivers/md/raid0.c 		bio_clone_blkg_association(discard_bio, bio);
bio               561 drivers/md/raid0.c 				bio->bi_iter.bi_sector);
bio               564 drivers/md/raid0.c 	bio_endio(bio);
bio               567 drivers/md/raid0.c static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
bio               578 drivers/md/raid0.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
bio               579 drivers/md/raid0.c 	    && md_flush_request(mddev, bio))
bio               582 drivers/md/raid0.c 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
bio               583 drivers/md/raid0.c 		raid0_handle_discard(mddev, bio);
bio               587 drivers/md/raid0.c 	bio_sector = bio->bi_iter.bi_sector;
bio               599 drivers/md/raid0.c 	if (sectors < bio_sectors(bio)) {
bio               600 drivers/md/raid0.c 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
bio               602 drivers/md/raid0.c 		bio_chain(split, bio);
bio               603 drivers/md/raid0.c 		generic_make_request(bio);
bio               604 drivers/md/raid0.c 		bio = split;
bio               618 drivers/md/raid0.c 		bio_io_error(bio);
bio               623 drivers/md/raid0.c 		bio_io_error(bio);
bio               627 drivers/md/raid0.c 	bio_set_dev(bio, tmp_dev->bdev);
bio               628 drivers/md/raid0.c 	bio->bi_iter.bi_sector = sector + zone->dev_start +
bio               632 drivers/md/raid0.c 		trace_block_bio_remap(bio->bi_disk->queue, bio,
bio               634 drivers/md/raid0.c 	mddev_check_writesame(mddev, bio);
bio               635 drivers/md/raid0.c 	mddev_check_write_zeroes(mddev, bio);
bio               636 drivers/md/raid0.c 	generic_make_request(bio);
bio                16 drivers/md/raid1-10.c #define IO_BLOCKED ((struct bio *)1)
bio                21 drivers/md/raid1-10.c #define IO_MADE_GOOD ((struct bio *)2)
bio                23 drivers/md/raid1-10.c #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
bio                89 drivers/md/raid1-10.c static inline struct resync_pages *get_resync_pages(struct bio *bio)
bio                91 drivers/md/raid1-10.c 	return bio->bi_private;
bio                95 drivers/md/raid1-10.c static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
bio               109 drivers/md/raid1-10.c 		bio_add_page(bio, page, len, 0);
bio               108 drivers/md/raid1.c static inline struct r1bio *get_resync_r1bio(struct bio *bio)
bio               110 drivers/md/raid1.c 	return get_resync_pages(bio)->raid_bio;
bio               133 drivers/md/raid1.c 	struct bio *bio;
bio               151 drivers/md/raid1.c 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
bio               152 drivers/md/raid1.c 		if (!bio)
bio               154 drivers/md/raid1.c 		r1_bio->bios[j] = bio;
bio               169 drivers/md/raid1.c 		bio = r1_bio->bios[j];
bio               180 drivers/md/raid1.c 		bio->bi_private = rp;
bio               225 drivers/md/raid1.c 		struct bio **bio = r1_bio->bios + i;
bio               226 drivers/md/raid1.c 		if (!BIO_SPECIAL(*bio))
bio               227 drivers/md/raid1.c 			bio_put(*bio);
bio               228 drivers/md/raid1.c 		*bio = NULL;
bio               247 drivers/md/raid1.c 		struct bio *bio = r1_bio->bios[i];
bio               248 drivers/md/raid1.c 		if (bio->bi_end_io)
bio               281 drivers/md/raid1.c 	struct bio *bio = r1_bio->master_bio;
bio               285 drivers/md/raid1.c 		bio->bi_status = BLK_STS_IOERR;
bio               287 drivers/md/raid1.c 	bio_endio(bio);
bio               297 drivers/md/raid1.c 	struct bio *bio = r1_bio->master_bio;
bio               302 drivers/md/raid1.c 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
bio               303 drivers/md/raid1.c 			 (unsigned long long) bio->bi_iter.bi_sector,
bio               304 drivers/md/raid1.c 			 (unsigned long long) bio_end_sector(bio) - 1);
bio               325 drivers/md/raid1.c static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
bio               332 drivers/md/raid1.c 		if (r1_bio->bios[mirror] == bio)
bio               341 drivers/md/raid1.c static void raid1_end_read_request(struct bio *bio)
bio               343 drivers/md/raid1.c 	int uptodate = !bio->bi_status;
bio               344 drivers/md/raid1.c 	struct r1bio *r1_bio = bio->bi_private;
bio               424 drivers/md/raid1.c static void raid1_end_write_request(struct bio *bio)
bio               426 drivers/md/raid1.c 	struct r1bio *r1_bio = bio->bi_private;
bio               429 drivers/md/raid1.c 	struct bio *to_put = NULL;
bio               430 drivers/md/raid1.c 	int mirror = find_bio_disk(r1_bio, bio);
bio               434 drivers/md/raid1.c 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
bio               439 drivers/md/raid1.c 	if (bio->bi_status && !discard_error) {
bio               446 drivers/md/raid1.c 		    (bio->bi_opf & MD_FAILFAST) &&
bio               463 drivers/md/raid1.c 			to_put = bio;
bio               480 drivers/md/raid1.c 		to_put = bio;
bio               522 drivers/md/raid1.c 				struct bio *mbio = r1_bio->master_bio;
bio               802 drivers/md/raid1.c static void flush_bio_list(struct r1conf *conf, struct bio *bio)
bio               808 drivers/md/raid1.c 	while (bio) { /* submit pending writes */
bio               809 drivers/md/raid1.c 		struct bio *next = bio->bi_next;
bio               810 drivers/md/raid1.c 		struct md_rdev *rdev = (void *)bio->bi_disk;
bio               811 drivers/md/raid1.c 		bio->bi_next = NULL;
bio               812 drivers/md/raid1.c 		bio_set_dev(bio, rdev->bdev);
bio               814 drivers/md/raid1.c 			bio_io_error(bio);
bio               815 drivers/md/raid1.c 		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
bio               816 drivers/md/raid1.c 				    !blk_queue_discard(bio->bi_disk->queue)))
bio               818 drivers/md/raid1.c 			bio_endio(bio);
bio               820 drivers/md/raid1.c 			generic_make_request(bio);
bio               821 drivers/md/raid1.c 		bio = next;
bio               834 drivers/md/raid1.c 		struct bio *bio;
bio               836 drivers/md/raid1.c 		bio = bio_list_get(&conf->pending_bio_list);
bio               851 drivers/md/raid1.c 		flush_bio_list(conf, bio);
bio              1112 drivers/md/raid1.c 					   struct bio *bio)
bio              1114 drivers/md/raid1.c 	int size = bio->bi_iter.bi_size;
bio              1117 drivers/md/raid1.c 	struct bio *behind_bio = NULL;
bio              1124 drivers/md/raid1.c 	if (!bio_has_data(bio)) {
bio              1129 drivers/md/raid1.c 	behind_bio->bi_write_hint = bio->bi_write_hint;
bio              1145 drivers/md/raid1.c 	bio_copy_data(behind_bio, bio);
bio              1154 drivers/md/raid1.c 		 bio->bi_iter.bi_size);
bio              1171 drivers/md/raid1.c 	struct bio *bio;
bio              1185 drivers/md/raid1.c 	bio = bio_list_get(&plug->pending);
bio              1186 drivers/md/raid1.c 	flush_bio_list(conf, bio);
bio              1190 drivers/md/raid1.c static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
bio              1192 drivers/md/raid1.c 	r1_bio->master_bio = bio;
bio              1193 drivers/md/raid1.c 	r1_bio->sectors = bio_sectors(bio);
bio              1196 drivers/md/raid1.c 	r1_bio->sector = bio->bi_iter.bi_sector;
bio              1200 drivers/md/raid1.c alloc_r1bio(struct mddev *mddev, struct bio *bio)
bio              1208 drivers/md/raid1.c 	init_r1bio(r1_bio, mddev, bio);
bio              1212 drivers/md/raid1.c static void raid1_read_request(struct mddev *mddev, struct bio *bio,
bio              1217 drivers/md/raid1.c 	struct bio *read_bio;
bio              1219 drivers/md/raid1.c 	const int op = bio_op(bio);
bio              1220 drivers/md/raid1.c 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
bio              1249 drivers/md/raid1.c 	wait_read_barrier(conf, bio->bi_iter.bi_sector);
bio              1252 drivers/md/raid1.c 		r1_bio = alloc_r1bio(mddev, bio);
bio              1254 drivers/md/raid1.c 		init_r1bio(r1_bio, mddev, bio);
bio              1293 drivers/md/raid1.c 	if (max_sectors < bio_sectors(bio)) {
bio              1294 drivers/md/raid1.c 		struct bio *split = bio_split(bio, max_sectors,
bio              1296 drivers/md/raid1.c 		bio_chain(split, bio);
bio              1297 drivers/md/raid1.c 		generic_make_request(bio);
bio              1298 drivers/md/raid1.c 		bio = split;
bio              1299 drivers/md/raid1.c 		r1_bio->master_bio = bio;
bio              1305 drivers/md/raid1.c 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
bio              1326 drivers/md/raid1.c static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio              1342 drivers/md/raid1.c 		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {
bio              1349 drivers/md/raid1.c 							bio->bi_iter.bi_sector,
bio              1350 drivers/md/raid1.c 							bio_end_sector(bio)))
bio              1362 drivers/md/raid1.c 	wait_barrier(conf, bio->bi_iter.bi_sector);
bio              1364 drivers/md/raid1.c 	r1_bio = alloc_r1bio(mddev, bio);
bio              1445 drivers/md/raid1.c 		r1_bio->bios[i] = bio;
bio              1457 drivers/md/raid1.c 		allow_barrier(conf, bio->bi_iter.bi_sector);
bio              1460 drivers/md/raid1.c 		wait_barrier(conf, bio->bi_iter.bi_sector);
bio              1464 drivers/md/raid1.c 	if (max_sectors < bio_sectors(bio)) {
bio              1465 drivers/md/raid1.c 		struct bio *split = bio_split(bio, max_sectors,
bio              1467 drivers/md/raid1.c 		bio_chain(split, bio);
bio              1468 drivers/md/raid1.c 		generic_make_request(bio);
bio              1469 drivers/md/raid1.c 		bio = split;
bio              1470 drivers/md/raid1.c 		r1_bio->master_bio = bio;
bio              1480 drivers/md/raid1.c 		struct bio *mbio = NULL;
bio              1493 drivers/md/raid1.c 				alloc_behind_master_bio(r1_bio, bio);
bio              1505 drivers/md/raid1.c 			mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio              1527 drivers/md/raid1.c 		mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
bio              1566 drivers/md/raid1.c static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
bio              1570 drivers/md/raid1.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
bio              1571 drivers/md/raid1.c 	    && md_flush_request(mddev, bio))
bio              1582 drivers/md/raid1.c 		bio->bi_iter.bi_sector, bio_sectors(bio));
bio              1584 drivers/md/raid1.c 	if (bio_data_dir(bio) == READ)
bio              1585 drivers/md/raid1.c 		raid1_read_request(mddev, bio, sectors, NULL);
bio              1587 drivers/md/raid1.c 		if (!md_write_start(mddev,bio))
bio              1589 drivers/md/raid1.c 		raid1_write_request(mddev, bio, sectors);
bio              1876 drivers/md/raid1.c static void end_sync_read(struct bio *bio)
bio              1878 drivers/md/raid1.c 	struct r1bio *r1_bio = get_resync_r1bio(bio);
bio              1887 drivers/md/raid1.c 	if (!bio->bi_status)
bio              1924 drivers/md/raid1.c static void end_sync_write(struct bio *bio)
bio              1926 drivers/md/raid1.c 	int uptodate = !bio->bi_status;
bio              1927 drivers/md/raid1.c 	struct r1bio *r1_bio = get_resync_r1bio(bio);
bio              1932 drivers/md/raid1.c 	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
bio              1987 drivers/md/raid1.c 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
bio              1988 drivers/md/raid1.c 	struct page **pages = get_resync_pages(bio)->pages;
bio              2003 drivers/md/raid1.c 			bio->bi_end_io = end_sync_write;
bio              2042 drivers/md/raid1.c 					    mdname(mddev), bio_devname(bio, b),
bio              2100 drivers/md/raid1.c 	bio->bi_status = 0;
bio              2123 drivers/md/raid1.c 		struct bio *b = r1_bio->bios[i];
bio              2151 drivers/md/raid1.c 		struct bio *pbio = r1_bio->bios[primary];
bio              2152 drivers/md/raid1.c 		struct bio *sbio = r1_bio->bios[i];
bio              2196 drivers/md/raid1.c 	struct bio *wbio;
bio              2374 drivers/md/raid1.c 		struct bio *wbio;
bio              2416 drivers/md/raid1.c 		struct bio *bio = r1_bio->bios[m];
bio              2417 drivers/md/raid1.c 		if (bio->bi_end_io == NULL)
bio              2419 drivers/md/raid1.c 		if (!bio->bi_status &&
bio              2423 drivers/md/raid1.c 		if (bio->bi_status &&
bio              2482 drivers/md/raid1.c 	struct bio *bio;
bio              2495 drivers/md/raid1.c 	bio = r1_bio->bios[r1_bio->read_disk];
bio              2496 drivers/md/raid1.c 	bio_put(bio);
bio              2514 drivers/md/raid1.c 	bio = r1_bio->master_bio;
bio              2518 drivers/md/raid1.c 	raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
bio              2608 drivers/md/raid1.c 	struct bio *bio;
bio              2612 drivers/md/raid1.c 		bio = r1bio->bios[i];
bio              2613 drivers/md/raid1.c 		rps = bio->bi_private;
bio              2614 drivers/md/raid1.c 		bio_reset(bio);
bio              2615 drivers/md/raid1.c 		bio->bi_private = rps;
bio              2636 drivers/md/raid1.c 	struct bio *bio;
bio              2732 drivers/md/raid1.c 		bio = r1_bio->bios[i];
bio              2740 drivers/md/raid1.c 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio              2741 drivers/md/raid1.c 			bio->bi_end_io = end_sync_write;
bio              2767 drivers/md/raid1.c 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio              2768 drivers/md/raid1.c 				bio->bi_end_io = end_sync_read;
bio              2779 drivers/md/raid1.c 				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio              2780 drivers/md/raid1.c 				bio->bi_end_io = end_sync_write;
bio              2784 drivers/md/raid1.c 		if (rdev && bio->bi_end_io) {
bio              2786 drivers/md/raid1.c 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio              2787 drivers/md/raid1.c 			bio_set_dev(bio, rdev->bdev);
bio              2789 drivers/md/raid1.c 				bio->bi_opf |= MD_FAILFAST;
bio              2875 drivers/md/raid1.c 			bio = r1_bio->bios[i];
bio              2876 drivers/md/raid1.c 			rp = get_resync_pages(bio);
bio              2877 drivers/md/raid1.c 			if (bio->bi_end_io) {
bio              2884 drivers/md/raid1.c 				bio_add_page(bio, page, len, 0);
bio              2910 drivers/md/raid1.c 			bio = r1_bio->bios[i];
bio              2911 drivers/md/raid1.c 			if (bio->bi_end_io == end_sync_read) {
bio              2913 drivers/md/raid1.c 				md_sync_acct_bio(bio, nr_sectors);
bio              2915 drivers/md/raid1.c 					bio->bi_opf &= ~MD_FAILFAST;
bio              2916 drivers/md/raid1.c 				generic_make_request(bio);
bio              2921 drivers/md/raid1.c 		bio = r1_bio->bios[r1_bio->read_disk];
bio              2922 drivers/md/raid1.c 		md_sync_acct_bio(bio, nr_sectors);
bio              2924 drivers/md/raid1.c 			bio->bi_opf &= ~MD_FAILFAST;
bio              2925 drivers/md/raid1.c 		generic_make_request(bio);
bio               165 drivers/md/raid1.h 	struct bio		*master_bio;
bio               177 drivers/md/raid1.h 	struct bio		*behind_master_bio;
bio               183 drivers/md/raid1.h 	struct bio		*bios[0];
bio                74 drivers/md/raid10.c static void end_reshape_write(struct bio *bio);
bio                86 drivers/md/raid10.c static inline struct r10bio *get_resync_r10bio(struct bio *bio)
bio                88 drivers/md/raid10.c 	return get_resync_pages(bio)->raid_bio;
bio               120 drivers/md/raid10.c 	struct bio *bio;
bio               148 drivers/md/raid10.c 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
bio               149 drivers/md/raid10.c 		if (!bio)
bio               151 drivers/md/raid10.c 		r10_bio->devs[j].bio = bio;
bio               154 drivers/md/raid10.c 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
bio               155 drivers/md/raid10.c 		if (!bio)
bio               157 drivers/md/raid10.c 		r10_bio->devs[j].repl_bio = bio;
bio               164 drivers/md/raid10.c 		struct bio *rbio = r10_bio->devs[j].repl_bio;
bio               171 drivers/md/raid10.c 		bio = r10_bio->devs[j].bio;
bio               183 drivers/md/raid10.c 		bio->bi_private = rp;
bio               199 drivers/md/raid10.c 		if (r10_bio->devs[j].bio)
bio               200 drivers/md/raid10.c 			bio_put(r10_bio->devs[j].bio);
bio               218 drivers/md/raid10.c 		struct bio *bio = r10bio->devs[j].bio;
bio               220 drivers/md/raid10.c 		if (bio) {
bio               221 drivers/md/raid10.c 			rp = get_resync_pages(bio);
bio               223 drivers/md/raid10.c 			bio_put(bio);
bio               226 drivers/md/raid10.c 		bio = r10bio->devs[j].repl_bio;
bio               227 drivers/md/raid10.c 		if (bio)
bio               228 drivers/md/raid10.c 			bio_put(bio);
bio               242 drivers/md/raid10.c 		struct bio **bio = & r10_bio->devs[i].bio;
bio               243 drivers/md/raid10.c 		if (!BIO_SPECIAL(*bio))
bio               244 drivers/md/raid10.c 			bio_put(*bio);
bio               245 drivers/md/raid10.c 		*bio = NULL;
bio               246 drivers/md/raid10.c 		bio = &r10_bio->devs[i].repl_bio;
bio               247 drivers/md/raid10.c 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
bio               248 drivers/md/raid10.c 			bio_put(*bio);
bio               249 drivers/md/raid10.c 		*bio = NULL;
bio               294 drivers/md/raid10.c 	struct bio *bio = r10_bio->master_bio;
bio               298 drivers/md/raid10.c 		bio->bi_status = BLK_STS_IOERR;
bio               300 drivers/md/raid10.c 	bio_endio(bio);
bio               325 drivers/md/raid10.c 			 struct bio *bio, int *slotp, int *replp)
bio               331 drivers/md/raid10.c 		if (r10_bio->devs[slot].bio == bio)
bio               333 drivers/md/raid10.c 		if (r10_bio->devs[slot].repl_bio == bio) {
bio               349 drivers/md/raid10.c static void raid10_end_read_request(struct bio *bio)
bio               351 drivers/md/raid10.c 	int uptodate = !bio->bi_status;
bio               352 drivers/md/raid10.c 	struct r10bio *r10_bio = bio->bi_private;
bio               427 drivers/md/raid10.c static void raid10_end_write_request(struct bio *bio)
bio               429 drivers/md/raid10.c 	struct r10bio *r10_bio = bio->bi_private;
bio               435 drivers/md/raid10.c 	struct bio *to_put = NULL;
bio               438 drivers/md/raid10.c 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
bio               440 drivers/md/raid10.c 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
bio               452 drivers/md/raid10.c 	if (bio->bi_status && !discard_error) {
bio               466 drivers/md/raid10.c 			    (bio->bi_opf & MD_FAILFAST)) {
bio               479 drivers/md/raid10.c 				r10_bio->devs[slot].bio = NULL;
bio               480 drivers/md/raid10.c 				to_put = bio;
bio               514 drivers/md/raid10.c 			bio_put(bio);
bio               518 drivers/md/raid10.c 				r10_bio->devs[slot].bio = IO_MADE_GOOD;
bio               749 drivers/md/raid10.c 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
bio               885 drivers/md/raid10.c 		struct bio *bio;
bio               887 drivers/md/raid10.c 		bio = bio_list_get(&conf->pending_bio_list);
bio               908 drivers/md/raid10.c 		while (bio) { /* submit pending writes */
bio               909 drivers/md/raid10.c 			struct bio *next = bio->bi_next;
bio               910 drivers/md/raid10.c 			struct md_rdev *rdev = (void*)bio->bi_disk;
bio               911 drivers/md/raid10.c 			bio->bi_next = NULL;
bio               912 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bio               914 drivers/md/raid10.c 				bio_io_error(bio);
bio               915 drivers/md/raid10.c 			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
bio               916 drivers/md/raid10.c 					    !blk_queue_discard(bio->bi_disk->queue)))
bio               918 drivers/md/raid10.c 				bio_endio(bio);
bio               920 drivers/md/raid10.c 				generic_make_request(bio);
bio               921 drivers/md/raid10.c 			bio = next;
bio              1075 drivers/md/raid10.c 	struct bio *bio;
bio              1089 drivers/md/raid10.c 	bio = bio_list_get(&plug->pending);
bio              1093 drivers/md/raid10.c 	while (bio) { /* submit pending writes */
bio              1094 drivers/md/raid10.c 		struct bio *next = bio->bi_next;
bio              1095 drivers/md/raid10.c 		struct md_rdev *rdev = (void*)bio->bi_disk;
bio              1096 drivers/md/raid10.c 		bio->bi_next = NULL;
bio              1097 drivers/md/raid10.c 		bio_set_dev(bio, rdev->bdev);
bio              1099 drivers/md/raid10.c 			bio_io_error(bio);
bio              1100 drivers/md/raid10.c 		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
bio              1101 drivers/md/raid10.c 				    !blk_queue_discard(bio->bi_disk->queue)))
bio              1103 drivers/md/raid10.c 			bio_endio(bio);
bio              1105 drivers/md/raid10.c 			generic_make_request(bio);
bio              1106 drivers/md/raid10.c 		bio = next;
bio              1118 drivers/md/raid10.c 				 struct bio *bio, sector_t sectors)
bio              1122 drivers/md/raid10.c 	    bio->bi_iter.bi_sector < conf->reshape_progress &&
bio              1123 drivers/md/raid10.c 	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
bio              1127 drivers/md/raid10.c 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
bio              1128 drivers/md/raid10.c 			   conf->reshape_progress >= bio->bi_iter.bi_sector +
bio              1134 drivers/md/raid10.c static void raid10_read_request(struct mddev *mddev, struct bio *bio,
bio              1138 drivers/md/raid10.c 	struct bio *read_bio;
bio              1139 drivers/md/raid10.c 	const int op = bio_op(bio);
bio              1140 drivers/md/raid10.c 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
bio              1176 drivers/md/raid10.c 	regular_request_wait(mddev, conf, bio, r10_bio->sectors);
bio              1192 drivers/md/raid10.c 	if (max_sectors < bio_sectors(bio)) {
bio              1193 drivers/md/raid10.c 		struct bio *split = bio_split(bio, max_sectors,
bio              1195 drivers/md/raid10.c 		bio_chain(split, bio);
bio              1197 drivers/md/raid10.c 		generic_make_request(bio);
bio              1199 drivers/md/raid10.c 		bio = split;
bio              1200 drivers/md/raid10.c 		r10_bio->master_bio = bio;
bio              1205 drivers/md/raid10.c 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
bio              1207 drivers/md/raid10.c 	r10_bio->devs[slot].bio = read_bio;
bio              1229 drivers/md/raid10.c 				  struct bio *bio, bool replacement,
bio              1232 drivers/md/raid10.c 	const int op = bio_op(bio);
bio              1233 drivers/md/raid10.c 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
bio              1234 drivers/md/raid10.c 	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
bio              1241 drivers/md/raid10.c 	struct bio *mbio;
bio              1253 drivers/md/raid10.c 	mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio              1257 drivers/md/raid10.c 		r10_bio->devs[n_copy].bio = mbio;
bio              1296 drivers/md/raid10.c static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio              1307 drivers/md/raid10.c 					    bio->bi_iter.bi_sector,
bio              1308 drivers/md/raid10.c 					    bio_end_sector(bio)))) {
bio              1314 drivers/md/raid10.c 				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
bio              1322 drivers/md/raid10.c 	regular_request_wait(mddev, conf, bio, sectors);
bio              1325 drivers/md/raid10.c 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
bio              1326 drivers/md/raid10.c 		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
bio              1327 drivers/md/raid10.c 	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
bio              1328 drivers/md/raid10.c 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
bio              1386 drivers/md/raid10.c 		r10_bio->devs[i].bio = NULL;
bio              1435 drivers/md/raid10.c 			r10_bio->devs[i].bio = bio;
bio              1439 drivers/md/raid10.c 			r10_bio->devs[i].repl_bio = bio;
bio              1451 drivers/md/raid10.c 			if (r10_bio->devs[j].bio) {
bio              1477 drivers/md/raid10.c 	if (r10_bio->sectors < bio_sectors(bio)) {
bio              1478 drivers/md/raid10.c 		struct bio *split = bio_split(bio, r10_bio->sectors,
bio              1480 drivers/md/raid10.c 		bio_chain(split, bio);
bio              1482 drivers/md/raid10.c 		generic_make_request(bio);
bio              1484 drivers/md/raid10.c 		bio = split;
bio              1485 drivers/md/raid10.c 		r10_bio->master_bio = bio;
bio              1492 drivers/md/raid10.c 		if (r10_bio->devs[i].bio)
bio              1493 drivers/md/raid10.c 			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
bio              1495 drivers/md/raid10.c 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
bio              1500 drivers/md/raid10.c static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
bio              1507 drivers/md/raid10.c 	r10_bio->master_bio = bio;
bio              1511 drivers/md/raid10.c 	r10_bio->sector = bio->bi_iter.bi_sector;
bio              1515 drivers/md/raid10.c 	if (bio_data_dir(bio) == READ)
bio              1516 drivers/md/raid10.c 		raid10_read_request(mddev, bio, r10_bio);
bio              1518 drivers/md/raid10.c 		raid10_write_request(mddev, bio, r10_bio);
bio              1521 drivers/md/raid10.c static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
bio              1526 drivers/md/raid10.c 	int sectors = bio_sectors(bio);
bio              1528 drivers/md/raid10.c 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
bio              1529 drivers/md/raid10.c 	    && md_flush_request(mddev, bio))
bio              1532 drivers/md/raid10.c 	if (!md_write_start(mddev, bio))
bio              1539 drivers/md/raid10.c 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
bio              1545 drivers/md/raid10.c 			(bio->bi_iter.bi_sector &
bio              1547 drivers/md/raid10.c 	__make_request(mddev, bio, sectors);
bio              1883 drivers/md/raid10.c static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
bio              1887 drivers/md/raid10.c 	if (!bio->bi_status)
bio              1909 drivers/md/raid10.c static void end_sync_read(struct bio *bio)
bio              1911 drivers/md/raid10.c 	struct r10bio *r10_bio = get_resync_r10bio(bio);
bio              1913 drivers/md/raid10.c 	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
bio              1915 drivers/md/raid10.c 	__end_sync_read(r10_bio, bio, d);
bio              1918 drivers/md/raid10.c static void end_reshape_read(struct bio *bio)
bio              1921 drivers/md/raid10.c 	struct r10bio *r10_bio = bio->bi_private;
bio              1923 drivers/md/raid10.c 	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
bio              1953 drivers/md/raid10.c static void end_sync_write(struct bio *bio)
bio              1955 drivers/md/raid10.c 	struct r10bio *r10_bio = get_resync_r10bio(bio);
bio              1965 drivers/md/raid10.c 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
bio              1971 drivers/md/raid10.c 	if (bio->bi_status) {
bio              2012 drivers/md/raid10.c 	struct bio *tbio, *fbio;
bio              2020 drivers/md/raid10.c 		if (!r10_bio->devs[i].bio->bi_status)
bio              2027 drivers/md/raid10.c 	fbio = r10_bio->devs[i].bio;
bio              2039 drivers/md/raid10.c 		tbio = r10_bio->devs[i].bio;
bio              2049 drivers/md/raid10.c 		if (!r10_bio->devs[i].bio->bi_status) {
bio              2114 drivers/md/raid10.c 		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
bio              2115 drivers/md/raid10.c 		    && r10_bio->devs[i].bio != fbio)
bio              2152 drivers/md/raid10.c 	struct bio *bio = r10_bio->devs[0].bio;
bio              2158 drivers/md/raid10.c 	struct page **pages = get_resync_pages(bio)->pages;
bio              2228 drivers/md/raid10.c 	struct bio *wbio, *wbio2;
bio              2241 drivers/md/raid10.c 	wbio = r10_bio->devs[1].bio;
bio              2360 drivers/md/raid10.c 		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
bio              2418 drivers/md/raid10.c 				r10_bio->devs[r10_bio->read_slot].bio
bio              2517 drivers/md/raid10.c 	struct bio *bio = r10_bio->master_bio;
bio              2549 drivers/md/raid10.c 		struct bio *wbio;
bio              2554 drivers/md/raid10.c 		wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio              2555 drivers/md/raid10.c 		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
bio              2579 drivers/md/raid10.c 	struct bio *bio;
bio              2591 drivers/md/raid10.c 	bio = r10_bio->devs[slot].bio;
bio              2592 drivers/md/raid10.c 	bio_put(bio);
bio              2593 drivers/md/raid10.c 	r10_bio->devs[slot].bio = NULL;
bio              2596 drivers/md/raid10.c 		r10_bio->devs[slot].bio = IO_BLOCKED;
bio              2626 drivers/md/raid10.c 			if (r10_bio->devs[m].bio == NULL ||
bio              2627 drivers/md/raid10.c 				r10_bio->devs[m].bio->bi_end_io == NULL)
bio              2629 drivers/md/raid10.c 			if (!r10_bio->devs[m].bio->bi_status) {
bio              2664 drivers/md/raid10.c 			struct bio *bio = r10_bio->devs[m].bio;
bio              2666 drivers/md/raid10.c 			if (bio == IO_MADE_GOOD) {
bio              2672 drivers/md/raid10.c 			} else if (bio != NULL && bio->bi_status) {
bio              2681 drivers/md/raid10.c 			bio = r10_bio->devs[m].repl_bio;
bio              2683 drivers/md/raid10.c 			if (rdev && bio == IO_MADE_GOOD) {
bio              2807 drivers/md/raid10.c 	struct bio *bio;
bio              2818 drivers/md/raid10.c 		bio = r10bio->devs[i].bio;
bio              2819 drivers/md/raid10.c 		rp = bio->bi_private;
bio              2820 drivers/md/raid10.c 		bio_reset(bio);
bio              2821 drivers/md/raid10.c 		bio->bi_private = rp;
bio              2822 drivers/md/raid10.c 		bio = r10bio->devs[i].repl_bio;
bio              2823 drivers/md/raid10.c 		if (bio) {
bio              2824 drivers/md/raid10.c 			rp = bio->bi_private;
bio              2825 drivers/md/raid10.c 			bio_reset(bio);
bio              2826 drivers/md/raid10.c 			bio->bi_private = rp;
bio              2906 drivers/md/raid10.c 	struct bio *biolist = NULL, *bio;
bio              3112 drivers/md/raid10.c 			r10_bio->master_bio = (struct bio*)rb2;
bio              3165 drivers/md/raid10.c 				bio = r10_bio->devs[0].bio;
bio              3166 drivers/md/raid10.c 				bio->bi_next = biolist;
bio              3167 drivers/md/raid10.c 				biolist = bio;
bio              3168 drivers/md/raid10.c 				bio->bi_end_io = end_sync_read;
bio              3169 drivers/md/raid10.c 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio              3171 drivers/md/raid10.c 					bio->bi_opf |= MD_FAILFAST;
bio              3173 drivers/md/raid10.c 				bio->bi_iter.bi_sector = from_addr +
bio              3175 drivers/md/raid10.c 				bio_set_dev(bio, rdev->bdev);
bio              3190 drivers/md/raid10.c 					bio = r10_bio->devs[1].bio;
bio              3191 drivers/md/raid10.c 					bio->bi_next = biolist;
bio              3192 drivers/md/raid10.c 					biolist = bio;
bio              3193 drivers/md/raid10.c 					bio->bi_end_io = end_sync_write;
bio              3194 drivers/md/raid10.c 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio              3195 drivers/md/raid10.c 					bio->bi_iter.bi_sector = to_addr
bio              3197 drivers/md/raid10.c 					bio_set_dev(bio, mrdev->bdev);
bio              3200 drivers/md/raid10.c 					r10_bio->devs[1].bio->bi_end_io = NULL;
bio              3203 drivers/md/raid10.c 				bio = r10_bio->devs[1].repl_bio;
bio              3204 drivers/md/raid10.c 				if (bio)
bio              3205 drivers/md/raid10.c 					bio->bi_end_io = NULL;
bio              3212 drivers/md/raid10.c 				bio->bi_next = biolist;
bio              3213 drivers/md/raid10.c 				biolist = bio;
bio              3214 drivers/md/raid10.c 				bio->bi_end_io = end_sync_write;
bio              3215 drivers/md/raid10.c 				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio              3216 drivers/md/raid10.c 				bio->bi_iter.bi_sector = to_addr +
bio              3218 drivers/md/raid10.c 				bio_set_dev(bio, mreplace->bdev);
bio              3268 drivers/md/raid10.c 			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
bio              3282 drivers/md/raid10.c 					r10_bio->devs[0].bio->bi_opf
bio              3343 drivers/md/raid10.c 			bio = r10_bio->devs[i].bio;
bio              3344 drivers/md/raid10.c 			bio->bi_status = BLK_STS_IOERR;
bio              3366 drivers/md/raid10.c 			bio->bi_next = biolist;
bio              3367 drivers/md/raid10.c 			biolist = bio;
bio              3368 drivers/md/raid10.c 			bio->bi_end_io = end_sync_read;
bio              3369 drivers/md/raid10.c 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio              3371 drivers/md/raid10.c 				bio->bi_opf |= MD_FAILFAST;
bio              3372 drivers/md/raid10.c 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio              3373 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bio              3384 drivers/md/raid10.c 			bio = r10_bio->devs[i].repl_bio;
bio              3385 drivers/md/raid10.c 			bio->bi_status = BLK_STS_IOERR;
bio              3388 drivers/md/raid10.c 			bio->bi_next = biolist;
bio              3389 drivers/md/raid10.c 			biolist = bio;
bio              3390 drivers/md/raid10.c 			bio->bi_end_io = end_sync_write;
bio              3391 drivers/md/raid10.c 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio              3393 drivers/md/raid10.c 				bio->bi_opf |= MD_FAILFAST;
bio              3394 drivers/md/raid10.c 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio              3395 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bio              3403 drivers/md/raid10.c 				if (r10_bio->devs[i].bio->bi_end_io)
bio              3428 drivers/md/raid10.c 		for (bio= biolist ; bio ; bio=bio->bi_next) {
bio              3429 drivers/md/raid10.c 			struct resync_pages *rp = get_resync_pages(bio);
bio              3435 drivers/md/raid10.c 			bio_add_page(bio, page, len, 0);
bio              3489 drivers/md/raid10.c 		bio = biolist;
bio              3492 drivers/md/raid10.c 		bio->bi_next = NULL;
bio              3493 drivers/md/raid10.c 		r10_bio = get_resync_r10bio(bio);
bio              3496 drivers/md/raid10.c 		if (bio->bi_end_io == end_sync_read) {
bio              3497 drivers/md/raid10.c 			md_sync_acct_bio(bio, nr_sectors);
bio              3498 drivers/md/raid10.c 			bio->bi_status = 0;
bio              3499 drivers/md/raid10.c 			generic_make_request(bio);
bio              4441 drivers/md/raid10.c 	struct bio *blist;
bio              4442 drivers/md/raid10.c 	struct bio *bio, *read_bio;
bio              4609 drivers/md/raid10.c 		struct bio *b;
bio              4617 drivers/md/raid10.c 			b = r10_bio->devs[s/2].bio;
bio              4634 drivers/md/raid10.c 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
bio              4640 drivers/md/raid10.c 		for (bio = blist; bio ; bio = bio->bi_next) {
bio              4645 drivers/md/raid10.c 			bio_add_page(bio, page, len, 0);
bio              4700 drivers/md/raid10.c 		struct bio *b;
bio              4709 drivers/md/raid10.c 			b = r10_bio->devs[s/2].bio;
bio              4782 drivers/md/raid10.c 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
bio              4839 drivers/md/raid10.c static void end_reshape_write(struct bio *bio)
bio              4841 drivers/md/raid10.c 	struct r10bio *r10_bio = get_resync_r10bio(bio);
bio              4849 drivers/md/raid10.c 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
bio              4857 drivers/md/raid10.c 	if (bio->bi_status) {
bio               131 drivers/md/raid10.h 	struct bio		*master_bio;
bio               147 drivers/md/raid10.h 		struct bio	*bio;
bio               149 drivers/md/raid10.h 			struct bio	*repl_bio; /* used for resync and
bio               114 drivers/md/raid5-cache.c 	struct bio flush_bio;
bio               217 drivers/md/raid5-cache.c 	struct bio *current_bio;/* current_bio accepting new data */
bio               228 drivers/md/raid5-cache.c 	struct bio *split_bio;
bio               296 drivers/md/raid5-cache.c 	struct bio *wbi, *wbi2;
bio               560 drivers/md/raid5-cache.c static void r5l_log_endio(struct bio *bio)
bio               562 drivers/md/raid5-cache.c 	struct r5l_io_unit *io = bio->bi_private;
bio               569 drivers/md/raid5-cache.c 	if (bio->bi_status)
bio               572 drivers/md/raid5-cache.c 	bio_put(bio);
bio               609 drivers/md/raid5-cache.c 		struct bio *bi;
bio               738 drivers/md/raid5-cache.c static struct bio *r5l_bio_alloc(struct r5l_log *log)
bio               740 drivers/md/raid5-cache.c 	struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
bio               742 drivers/md/raid5-cache.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               743 drivers/md/raid5-cache.c 	bio_set_dev(bio, log->rdev->bdev);
bio               744 drivers/md/raid5-cache.c 	bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
bio               746 drivers/md/raid5-cache.c 	return bio;
bio              1101 drivers/md/raid5-cache.c int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio              1111 drivers/md/raid5-cache.c 		if (bio->bi_iter.bi_size == 0) {
bio              1112 drivers/md/raid5-cache.c 			bio_endio(bio);
bio              1115 drivers/md/raid5-cache.c 		bio->bi_opf &= ~REQ_PREFLUSH;
bio              1118 drivers/md/raid5-cache.c 		if (bio->bi_iter.bi_size == 0) {
bio              1121 drivers/md/raid5-cache.c 			bio_list_add(&log->current_io->flush_barriers, bio);
bio              1257 drivers/md/raid5-cache.c static void r5l_log_flush_endio(struct bio *bio)
bio              1259 drivers/md/raid5-cache.c 	struct r5l_log *log = container_of(bio, struct r5l_log,
bio              1264 drivers/md/raid5-cache.c 	if (bio->bi_status)
bio              1631 drivers/md/raid5-cache.c 	struct bio *ra_bio;	/* bio to do the read ahead */
bio                11 drivers/md/raid5-log.h extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
bio                47 drivers/md/raid5-log.h extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
bio               107 drivers/md/raid5-log.h static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
bio               112 drivers/md/raid5-log.h 		ret = r5l_handle_flush_request(conf->log, bio);
bio               114 drivers/md/raid5-log.h 		ret = ppl_handle_flush_request(conf->log, bio);
bio               153 drivers/md/raid5-ppl.c 	struct bio bio;
bio               253 drivers/md/raid5-ppl.c 	bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
bio               397 drivers/md/raid5-ppl.c static void ppl_log_endio(struct bio *bio)
bio               399 drivers/md/raid5-ppl.c 	struct ppl_io_unit *io = bio->bi_private;
bio               406 drivers/md/raid5-ppl.c 	if (bio->bi_status)
bio               417 drivers/md/raid5-ppl.c static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
bio               422 drivers/md/raid5-ppl.c 		 __func__, io->seq, bio->bi_iter.bi_size,
bio               423 drivers/md/raid5-ppl.c 		 (unsigned long long)bio->bi_iter.bi_sector,
bio               424 drivers/md/raid5-ppl.c 		 bio_devname(bio, b));
bio               426 drivers/md/raid5-ppl.c 	submit_bio(bio);
bio               434 drivers/md/raid5-ppl.c 	struct bio *bio = &io->bio;
bio               438 drivers/md/raid5-ppl.c 	bio->bi_private = io;
bio               441 drivers/md/raid5-ppl.c 		ppl_log_endio(bio);
bio               467 drivers/md/raid5-ppl.c 	bio->bi_end_io = ppl_log_endio;
bio               468 drivers/md/raid5-ppl.c 	bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
bio               469 drivers/md/raid5-ppl.c 	bio_set_dev(bio, log->rdev->bdev);
bio               470 drivers/md/raid5-ppl.c 	bio->bi_iter.bi_sector = log->next_io_sector;
bio               471 drivers/md/raid5-ppl.c 	bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio               472 drivers/md/raid5-ppl.c 	bio->bi_write_hint = ppl_conf->write_hint;
bio               496 drivers/md/raid5-ppl.c 		if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
bio               497 drivers/md/raid5-ppl.c 			struct bio *prev = bio;
bio               499 drivers/md/raid5-ppl.c 			bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
bio               501 drivers/md/raid5-ppl.c 			bio->bi_opf = prev->bi_opf;
bio               502 drivers/md/raid5-ppl.c 			bio->bi_write_hint = prev->bi_write_hint;
bio               503 drivers/md/raid5-ppl.c 			bio_copy_dev(bio, prev);
bio               504 drivers/md/raid5-ppl.c 			bio->bi_iter.bi_sector = bio_end_sector(prev);
bio               505 drivers/md/raid5-ppl.c 			bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
bio               507 drivers/md/raid5-ppl.c 			bio_chain(bio, prev);
bio               512 drivers/md/raid5-ppl.c 	ppl_submit_iounit_bio(io, bio);
bio               587 drivers/md/raid5-ppl.c static void ppl_flush_endio(struct bio *bio)
bio               589 drivers/md/raid5-ppl.c 	struct ppl_io_unit *io = bio->bi_private;
bio               595 drivers/md/raid5-ppl.c 	pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
bio               597 drivers/md/raid5-ppl.c 	if (bio->bi_status) {
bio               601 drivers/md/raid5-ppl.c 		rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
bio               607 drivers/md/raid5-ppl.c 	bio_put(bio);
bio               637 drivers/md/raid5-ppl.c 			struct bio *bio;
bio               640 drivers/md/raid5-ppl.c 			bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
bio               641 drivers/md/raid5-ppl.c 			bio_set_dev(bio, bdev);
bio               642 drivers/md/raid5-ppl.c 			bio->bi_private = io;
bio               643 drivers/md/raid5-ppl.c 			bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio               644 drivers/md/raid5-ppl.c 			bio->bi_end_io = ppl_flush_endio;
bio               647 drivers/md/raid5-ppl.c 				 bio_devname(bio, b));
bio               649 drivers/md/raid5-ppl.c 			submit_bio(bio);
bio               691 drivers/md/raid5-ppl.c int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio               693 drivers/md/raid5-ppl.c 	if (bio->bi_iter.bi_size == 0) {
bio               694 drivers/md/raid5-ppl.c 		bio_endio(bio);
bio               697 drivers/md/raid5-ppl.c 	bio->bi_opf &= ~REQ_PREFLUSH;
bio               873 drivers/md/raid5.c 	struct bio *bio;
bio               875 drivers/md/raid5.c 	while ((bio = bio_list_pop(tmp)))
bio               876 drivers/md/raid5.c 		generic_make_request(bio);
bio               975 drivers/md/raid5.c raid5_end_read_request(struct bio *bi);
bio               977 drivers/md/raid5.c raid5_end_write_request(struct bio *bi);
bio               997 drivers/md/raid5.c 		struct bio *bi, *rbi;
bio              1228 drivers/md/raid5.c async_copy_data(int frombio, struct bio *bio, struct page **page,
bio              1239 drivers/md/raid5.c 	if (bio->bi_iter.bi_sector >= sector)
bio              1240 drivers/md/raid5.c 		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
bio              1242 drivers/md/raid5.c 		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
bio              1248 drivers/md/raid5.c 	bio_for_each_segment(bvl, bio, iter) {
bio              1309 drivers/md/raid5.c 			struct bio *rbi, *rbi2;
bio              1341 drivers/md/raid5.c 			struct bio *rbi;
bio              1732 drivers/md/raid5.c 		struct bio *chosen;
bio              1736 drivers/md/raid5.c 			struct bio *wbi;
bio              2461 drivers/md/raid5.c static void raid5_end_read_request(struct bio * bi)
bio              2592 drivers/md/raid5.c static void raid5_end_write_request(struct bio *bi)
bio              3205 drivers/md/raid5.c static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
bio              3208 drivers/md/raid5.c 	struct bio **bip;
bio              3355 drivers/md/raid5.c 		struct bio *bi;
bio              3393 drivers/md/raid5.c 			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
bio              3414 drivers/md/raid5.c 			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
bio              3438 drivers/md/raid5.c 				struct bio *nextbi =
bio              3767 drivers/md/raid5.c 				struct bio *wbi, *wbi2;
bio              5117 drivers/md/raid5.c static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
bio              5120 drivers/md/raid5.c 	sector_t sector = bio->bi_iter.bi_sector;
bio              5122 drivers/md/raid5.c 	unsigned int bio_sectors = bio_sectors(bio);
bio              5124 drivers/md/raid5.c 	WARN_ON_ONCE(bio->bi_partno);
bio              5135 drivers/md/raid5.c static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
bio              5148 drivers/md/raid5.c static struct bio *remove_bio_from_retry(struct r5conf *conf,
bio              5151 drivers/md/raid5.c 	struct bio *bi;
bio              5175 drivers/md/raid5.c static void raid5_align_endio(struct bio *bi)
bio              5177 drivers/md/raid5.c 	struct bio* raid_bi  = bi->bi_private;
bio              5204 drivers/md/raid5.c static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
bio              5208 drivers/md/raid5.c 	struct bio* align_bi;
bio              5294 drivers/md/raid5.c static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
bio              5296 drivers/md/raid5.c 	struct bio *split;
bio              5490 drivers/md/raid5.c static void make_discard_request(struct mddev *mddev, struct bio *bi)
bio              5577 drivers/md/raid5.c static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
bio              6115 drivers/md/raid5.c static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
bio              6295 drivers/md/raid5.c 		struct bio *bio;
bio              6315 drivers/md/raid5.c 		while ((bio = remove_bio_from_retry(conf, &offset))) {
bio              6318 drivers/md/raid5.c 			ok = retry_aligned_read(conf, bio, offset);
bio              7081 drivers/md/raid5.c 		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
bio               253 drivers/md/raid5.h 		struct bio	req, rreq;
bio               256 drivers/md/raid5.h 		struct bio	*toread, *read, *towrite, *written;
bio               493 drivers/md/raid5.h static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
bio               495 drivers/md/raid5.h 	if (bio_end_sector(bio) < sector + STRIPE_SECTORS)
bio               496 drivers/md/raid5.h 		return bio->bi_next;
bio               607 drivers/md/raid5.h 	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
bio               609 drivers/md/raid5.h 	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
bio                91 drivers/mtd/mtd_blkdevs.c 		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
bio                94 drivers/mtd/mtd_blkdevs.c 				kunmap(bio_page(req->bio));
bio                98 drivers/mtd/mtd_blkdevs.c 		kunmap(bio_page(req->bio));
bio               106 drivers/mtd/mtd_blkdevs.c 		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
bio               109 drivers/mtd/mtd_blkdevs.c 				kunmap(bio_page(req->bio));
bio               113 drivers/mtd/mtd_blkdevs.c 		kunmap(bio_page(req->bio));
bio               165 drivers/nvdimm/blk.c static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
bio               175 drivers/nvdimm/blk.c 	if (!bio_integrity_prep(bio))
bio               178 drivers/nvdimm/blk.c 	bip = bio_integrity(bio);
bio               180 drivers/nvdimm/blk.c 	rw = bio_data_dir(bio);
bio               181 drivers/nvdimm/blk.c 	do_acct = nd_iostat_start(bio, &start);
bio               182 drivers/nvdimm/blk.c 	bio_for_each_segment(bvec, bio, iter) {
bio               193 drivers/nvdimm/blk.c 			bio->bi_status = errno_to_blk_status(err);
bio               198 drivers/nvdimm/blk.c 		nd_iostat_end(bio, start);
bio               200 drivers/nvdimm/blk.c 	bio_endio(bio);
bio              1442 drivers/nvdimm/btt.c static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
bio              1444 drivers/nvdimm/btt.c 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio              1452 drivers/nvdimm/btt.c 	if (!bio_integrity_prep(bio))
bio              1455 drivers/nvdimm/btt.c 	do_acct = nd_iostat_start(bio, &start);
bio              1456 drivers/nvdimm/btt.c 	bio_for_each_segment(bvec, bio, iter) {
bio              1463 drivers/nvdimm/btt.c 			bio->bi_status = BLK_STS_IOERR;
bio              1468 drivers/nvdimm/btt.c 				  bio_op(bio), iter.bi_sector);
bio              1472 drivers/nvdimm/btt.c 					(op_is_write(bio_op(bio))) ? "WRITE" :
bio              1475 drivers/nvdimm/btt.c 			bio->bi_status = errno_to_blk_status(err);
bio              1480 drivers/nvdimm/btt.c 		nd_iostat_end(bio, start);
bio              1482 drivers/nvdimm/btt.c 	bio_endio(bio);
bio               158 drivers/nvdimm/nd.h 	int (*flush)(struct nd_region *nd_region, struct bio *bio);
bio               399 drivers/nvdimm/nd.h void __nd_iostat_start(struct bio *bio, unsigned long *start);
bio               400 drivers/nvdimm/nd.h static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
bio               402 drivers/nvdimm/nd.h 	struct gendisk *disk = bio->bi_disk;
bio               408 drivers/nvdimm/nd.h 	generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
bio               412 drivers/nvdimm/nd.h static inline void nd_iostat_end(struct bio *bio, unsigned long start)
bio               414 drivers/nvdimm/nd.h 	struct gendisk *disk = bio->bi_disk;
bio               416 drivers/nvdimm/nd.h 	generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
bio               101 drivers/nvdimm/nd_virtio.c int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
bio               107 drivers/nvdimm/nd_virtio.c 	if (bio && bio->bi_iter.bi_sector != -1) {
bio               108 drivers/nvdimm/nd_virtio.c 		struct bio *child = bio_alloc(GFP_ATOMIC, 0);
bio               112 drivers/nvdimm/nd_virtio.c 		bio_copy_dev(child, bio);
bio               115 drivers/nvdimm/nd_virtio.c 		bio_chain(child, bio);
bio               185 drivers/nvdimm/pmem.c static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
bio               196 drivers/nvdimm/pmem.c 	if (bio->bi_opf & REQ_PREFLUSH)
bio               197 drivers/nvdimm/pmem.c 		ret = nvdimm_flush(nd_region, bio);
bio               199 drivers/nvdimm/pmem.c 	do_acct = nd_iostat_start(bio, &start);
bio               200 drivers/nvdimm/pmem.c 	bio_for_each_segment(bvec, bio, iter) {
bio               202 drivers/nvdimm/pmem.c 				bvec.bv_offset, bio_op(bio), iter.bi_sector);
bio               204 drivers/nvdimm/pmem.c 			bio->bi_status = rc;
bio               209 drivers/nvdimm/pmem.c 		nd_iostat_end(bio, start);
bio               211 drivers/nvdimm/pmem.c 	if (bio->bi_opf & REQ_FUA)
bio               212 drivers/nvdimm/pmem.c 		ret = nvdimm_flush(nd_region, bio);
bio               215 drivers/nvdimm/pmem.c 		bio->bi_status = errno_to_blk_status(ret);
bio               217 drivers/nvdimm/pmem.c 	bio_endio(bio);
bio              1082 drivers/nvdimm/region_devs.c int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
bio              1089 drivers/nvdimm/region_devs.c 		if (nd_region->flush(nd_region, bio))
bio                54 drivers/nvdimm/virtio_pmem.h int async_pmem_flush(struct nd_region *nd_region, struct bio *bio);
bio               613 drivers/nvme/host/core.c 	struct bio *bio;
bio               635 drivers/nvme/host/core.c 	__rq_for_each_bio(bio, req) {
bio               636 drivers/nvme/host/core.c 		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
bio               637 drivers/nvme/host/core.c 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
bio               862 drivers/nvme/host/core.c static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
bio               877 drivers/nvme/host/core.c 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
bio               885 drivers/nvme/host/core.c 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
bio               905 drivers/nvme/host/core.c 	struct bio *bio = NULL;
bio               921 drivers/nvme/host/core.c 		bio = req->bio;
bio               922 drivers/nvme/host/core.c 		bio->bi_disk = disk;
bio               924 drivers/nvme/host/core.c 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
bio               947 drivers/nvme/host/core.c 	if (bio)
bio               948 drivers/nvme/host/core.c 		blk_rq_unmap_user(bio);
bio               662 drivers/nvme/host/lightnvm.c 	if (rqd->bio)
bio               663 drivers/nvme/host/lightnvm.c 		blk_rq_append_bio(rq, &rqd->bio);
bio               762 drivers/nvme/host/lightnvm.c 	struct bio *bio = NULL;
bio               799 drivers/nvme/host/lightnvm.c 		bio = rq->bio;
bio               820 drivers/nvme/host/lightnvm.c 		bio->bi_disk = disk;
bio               842 drivers/nvme/host/lightnvm.c 	if (bio)
bio               843 drivers/nvme/host/lightnvm.c 		blk_rq_unmap_user(bio);
bio               297 drivers/nvme/host/multipath.c 		struct bio *bio)
bio               311 drivers/nvme/host/multipath.c 	blk_queue_split(q, &bio);
bio               316 drivers/nvme/host/multipath.c 		bio->bi_disk = ns->disk;
bio               317 drivers/nvme/host/multipath.c 		bio->bi_opf |= REQ_NVME_MPATH;
bio               318 drivers/nvme/host/multipath.c 		trace_block_bio_remap(bio->bi_disk->queue, bio,
bio               320 drivers/nvme/host/multipath.c 				      bio->bi_iter.bi_sector);
bio               321 drivers/nvme/host/multipath.c 		ret = direct_make_request(bio);
bio               326 drivers/nvme/host/multipath.c 		bio_list_add(&head->requeue_list, bio);
bio               331 drivers/nvme/host/multipath.c 		bio->bi_status = BLK_STS_IOERR;
bio               332 drivers/nvme/host/multipath.c 		bio_endio(bio);
bio               343 drivers/nvme/host/multipath.c 	struct bio *bio, *next;
bio               349 drivers/nvme/host/multipath.c 	while ((bio = next) != NULL) {
bio               350 drivers/nvme/host/multipath.c 		next = bio->bi_next;
bio               351 drivers/nvme/host/multipath.c 		bio->bi_next = NULL;
bio               357 drivers/nvme/host/multipath.c 		bio->bi_disk = head->disk;
bio               358 drivers/nvme/host/multipath.c 		generic_make_request(bio);
bio               551 drivers/nvme/host/nvme.h 					 req->bio, status);
bio                41 drivers/nvme/host/tcp.c 	struct bio		*curr_bio;
bio               225 drivers/nvme/host/tcp.c 		struct bio *bio = req->curr_bio;
bio               227 drivers/nvme/host/tcp.c 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
bio               228 drivers/nvme/host/tcp.c 		nsegs = bio_segments(bio);
bio               229 drivers/nvme/host/tcp.c 		size = bio->bi_iter.bi_size;
bio               230 drivers/nvme/host/tcp.c 		offset = bio->bi_iter.bi_bvec_done;
bio              2122 drivers/nvme/host/tcp.c 	req->curr_bio = rq->bio;
bio               136 drivers/nvme/target/io-cmd-bdev.c static void nvmet_bio_done(struct bio *bio)
bio               138 drivers/nvme/target/io-cmd-bdev.c 	struct nvmet_req *req = bio->bi_private;
bio               140 drivers/nvme/target/io-cmd-bdev.c 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
bio               141 drivers/nvme/target/io-cmd-bdev.c 	if (bio != &req->b.inline_bio)
bio               142 drivers/nvme/target/io-cmd-bdev.c 		bio_put(bio);
bio               148 drivers/nvme/target/io-cmd-bdev.c 	struct bio *bio;
bio               174 drivers/nvme/target/io-cmd-bdev.c 		bio = &req->b.inline_bio;
bio               175 drivers/nvme/target/io-cmd-bdev.c 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio               177 drivers/nvme/target/io-cmd-bdev.c 		bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio               179 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
bio               180 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_iter.bi_sector = sector;
bio               181 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_private = req;
bio               182 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_end_io = nvmet_bio_done;
bio               183 drivers/nvme/target/io-cmd-bdev.c 	bio_set_op_attrs(bio, op, op_flags);
bio               186 drivers/nvme/target/io-cmd-bdev.c 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
bio               188 drivers/nvme/target/io-cmd-bdev.c 			struct bio *prev = bio;
bio               190 drivers/nvme/target/io-cmd-bdev.c 			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio               191 drivers/nvme/target/io-cmd-bdev.c 			bio_set_dev(bio, req->ns->bdev);
bio               192 drivers/nvme/target/io-cmd-bdev.c 			bio->bi_iter.bi_sector = sector;
bio               193 drivers/nvme/target/io-cmd-bdev.c 			bio_set_op_attrs(bio, op, op_flags);
bio               195 drivers/nvme/target/io-cmd-bdev.c 			bio_chain(bio, prev);
bio               203 drivers/nvme/target/io-cmd-bdev.c 	submit_bio(bio);
bio               208 drivers/nvme/target/io-cmd-bdev.c 	struct bio *bio = &req->b.inline_bio;
bio               210 drivers/nvme/target/io-cmd-bdev.c 	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio               211 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
bio               212 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_private = req;
bio               213 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_end_io = nvmet_bio_done;
bio               214 drivers/nvme/target/io-cmd-bdev.c 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio               216 drivers/nvme/target/io-cmd-bdev.c 	submit_bio(bio);
bio               227 drivers/nvme/target/io-cmd-bdev.c 		struct nvme_dsm_range *range, struct bio **bio)
bio               235 drivers/nvme/target/io-cmd-bdev.c 			GFP_KERNEL, 0, bio);
bio               246 drivers/nvme/target/io-cmd-bdev.c 	struct bio *bio = NULL;
bio               256 drivers/nvme/target/io-cmd-bdev.c 		status = nvmet_bdev_discard_range(req, &range, &bio);
bio               261 drivers/nvme/target/io-cmd-bdev.c 	if (bio) {
bio               262 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_private = req;
bio               263 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_end_io = nvmet_bio_done;
bio               265 drivers/nvme/target/io-cmd-bdev.c 			bio->bi_status = BLK_STS_IOERR;
bio               266 drivers/nvme/target/io-cmd-bdev.c 			bio_endio(bio);
bio               268 drivers/nvme/target/io-cmd-bdev.c 			submit_bio(bio);
bio               293 drivers/nvme/target/io-cmd-bdev.c 	struct bio *bio = NULL;
bio               304 drivers/nvme/target/io-cmd-bdev.c 			GFP_KERNEL, &bio, 0);
bio               305 drivers/nvme/target/io-cmd-bdev.c 	if (bio) {
bio               306 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_private = req;
bio               307 drivers/nvme/target/io-cmd-bdev.c 		bio->bi_end_io = nvmet_bio_done;
bio               308 drivers/nvme/target/io-cmd-bdev.c 		submit_bio(bio);
bio               297 drivers/nvme/target/nvmet.h 			struct bio      inline_bio;
bio                61 drivers/s390/block/dasd_diag.c 	struct dasd_diag_bio bio[0];
bio               186 drivers/s390/block/dasd_diag.c 	private->iob.bio_list = dreq->bio;
bio               322 drivers/s390/block/dasd_diag.c 	struct dasd_diag_bio bio;
bio               403 drivers/s390/block/dasd_diag.c 		memset(&bio, 0, sizeof (struct dasd_diag_bio));
bio               404 drivers/s390/block/dasd_diag.c 		bio.type = MDSK_READ_REQ;
bio               405 drivers/s390/block/dasd_diag.c 		bio.block_number = private->pt_block + 1;
bio               406 drivers/s390/block/dasd_diag.c 		bio.buffer = label;
bio               413 drivers/s390/block/dasd_diag.c 		private->iob.bio_list = &bio;
bio               546 drivers/s390/block/dasd_diag.c 	dbio = dreq->bio;
bio                35 drivers/s390/block/dcssblk.c 						struct bio *bio);
bio               855 drivers/s390/block/dcssblk.c dcssblk_make_request(struct request_queue *q, struct bio *bio)
bio               865 drivers/s390/block/dcssblk.c 	blk_queue_split(q, &bio);
bio               868 drivers/s390/block/dcssblk.c 	dev_info = bio->bi_disk->private_data;
bio               871 drivers/s390/block/dcssblk.c 	if ((bio->bi_iter.bi_sector & 7) != 0 ||
bio               872 drivers/s390/block/dcssblk.c 	    (bio->bi_iter.bi_size & 4095) != 0)
bio               875 drivers/s390/block/dcssblk.c 	if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
bio               886 drivers/s390/block/dcssblk.c 			if (bio_data_dir(bio) == WRITE) {
bio               894 drivers/s390/block/dcssblk.c 	index = (bio->bi_iter.bi_sector >> 3);
bio               895 drivers/s390/block/dcssblk.c 	bio_for_each_segment(bvec, bio, iter) {
bio               902 drivers/s390/block/dcssblk.c 		if (bio_data_dir(bio) == READ) {
bio               911 drivers/s390/block/dcssblk.c 	bio_endio(bio);
bio               914 drivers/s390/block/dcssblk.c 	bio_io_error(bio);
bio               185 drivers/s390/block/xpram.c static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
bio               187 drivers/s390/block/xpram.c 	xpram_device_t *xdev = bio->bi_disk->private_data;
bio               194 drivers/s390/block/xpram.c 	blk_queue_split(q, &bio);
bio               196 drivers/s390/block/xpram.c 	if ((bio->bi_iter.bi_sector & 7) != 0 ||
bio               197 drivers/s390/block/xpram.c 	    (bio->bi_iter.bi_size & 4095) != 0)
bio               200 drivers/s390/block/xpram.c 	if ((bio->bi_iter.bi_size >> 12) > xdev->size)
bio               203 drivers/s390/block/xpram.c 	if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
bio               205 drivers/s390/block/xpram.c 	index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
bio               206 drivers/s390/block/xpram.c 	bio_for_each_segment(bvec, bio, iter) {
bio               214 drivers/s390/block/xpram.c 			if (bio_data_dir(bio) == READ) {
bio               226 drivers/s390/block/xpram.c 	bio_endio(bio);
bio               229 drivers/s390/block/xpram.c 	bio_io_error(bio);
bio              1046 drivers/scsi/scsi_lib.c 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
bio              1055 drivers/scsi/scsi_lib.c 		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
bio              1176 drivers/scsi/scsi_lib.c 	if (req->bio) {
bio               735 drivers/scsi/sd.c 	struct bio *bio = scmd->request->bio;
bio               740 drivers/scsi/sd.c 		if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
bio               743 drivers/scsi/sd.c 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
bio               750 drivers/scsi/sd.c 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
bio               757 drivers/scsi/sd.c 		if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
bio              1020 drivers/scsi/sd.c 	struct bio *bio = rq->bio;
bio              1028 drivers/scsi/sd.c 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
bio               131 drivers/scsi/sg.c 	struct bio *bio;
bio               822 drivers/scsi/sg.c 		if (srp->bio) {
bio              1821 drivers/scsi/sg.c 		srp->bio = rq->bio;
bio              1842 drivers/scsi/sg.c 	if (srp->bio)
bio              1843 drivers/scsi/sg.c 		ret = blk_rq_unmap_user(srp->bio);
bio               351 drivers/scsi/sr.c 			if (SCpnt->request->bio != NULL)
bio               353 drivers/scsi/sr.c 					bio_sectors(SCpnt->request->bio);
bio               521 drivers/scsi/st.c 	struct bio *tmp;
bio               528 drivers/scsi/st.c 	tmp = SRpnt->bio;
bio               578 drivers/scsi/st.c 	SRpnt->bio = req->bio;
bio                33 drivers/scsi/st.h 	struct bio *bio;
bio               353 drivers/scsi/ufs/ufshcd.c 			if (lrbp->cmd->request && lrbp->cmd->request->bio)
bio               355 drivers/scsi/ufs/ufshcd.c 				  lrbp->cmd->request->bio->bi_iter.bi_sector;
bio               287 drivers/target/target_core_iblock.c static void iblock_bio_done(struct bio *bio)
bio               289 drivers/target/target_core_iblock.c 	struct se_cmd *cmd = bio->bi_private;
bio               292 drivers/target/target_core_iblock.c 	if (bio->bi_status) {
bio               293 drivers/target/target_core_iblock.c 		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
bio               301 drivers/target/target_core_iblock.c 	bio_put(bio);
bio               306 drivers/target/target_core_iblock.c static struct bio *
bio               311 drivers/target/target_core_iblock.c 	struct bio *bio;
bio               320 drivers/target/target_core_iblock.c 	bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
bio               321 drivers/target/target_core_iblock.c 	if (!bio) {
bio               326 drivers/target/target_core_iblock.c 	bio_set_dev(bio, ib_dev->ibd_bd);
bio               327 drivers/target/target_core_iblock.c 	bio->bi_private = cmd;
bio               328 drivers/target/target_core_iblock.c 	bio->bi_end_io = &iblock_bio_done;
bio               329 drivers/target/target_core_iblock.c 	bio->bi_iter.bi_sector = lba;
bio               330 drivers/target/target_core_iblock.c 	bio_set_op_attrs(bio, op, op_flags);
bio               332 drivers/target/target_core_iblock.c 	return bio;
bio               338 drivers/target/target_core_iblock.c 	struct bio *bio;
bio               341 drivers/target/target_core_iblock.c 	while ((bio = bio_list_pop(list)))
bio               342 drivers/target/target_core_iblock.c 		submit_bio(bio);
bio               346 drivers/target/target_core_iblock.c static void iblock_end_io_flush(struct bio *bio)
bio               348 drivers/target/target_core_iblock.c 	struct se_cmd *cmd = bio->bi_private;
bio               350 drivers/target/target_core_iblock.c 	if (bio->bi_status)
bio               351 drivers/target/target_core_iblock.c 		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
bio               354 drivers/target/target_core_iblock.c 		if (bio->bi_status)
bio               360 drivers/target/target_core_iblock.c 	bio_put(bio);
bio               372 drivers/target/target_core_iblock.c 	struct bio *bio;
bio               381 drivers/target/target_core_iblock.c 	bio = bio_alloc(GFP_KERNEL, 0);
bio               382 drivers/target/target_core_iblock.c 	bio->bi_end_io = iblock_end_io_flush;
bio               383 drivers/target/target_core_iblock.c 	bio_set_dev(bio, ib_dev->ibd_bd);
bio               384 drivers/target/target_core_iblock.c 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio               386 drivers/target/target_core_iblock.c 		bio->bi_private = cmd;
bio               387 drivers/target/target_core_iblock.c 	submit_bio(bio);
bio               449 drivers/target/target_core_iblock.c 	struct bio *bio;
bio               481 drivers/target/target_core_iblock.c 	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
bio               482 drivers/target/target_core_iblock.c 	if (!bio)
bio               486 drivers/target/target_core_iblock.c 	bio_list_add(&list, bio);
bio               491 drivers/target/target_core_iblock.c 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
bio               494 drivers/target/target_core_iblock.c 			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
bio               496 drivers/target/target_core_iblock.c 			if (!bio)
bio               500 drivers/target/target_core_iblock.c 			bio_list_add(&list, bio);
bio               512 drivers/target/target_core_iblock.c 	while ((bio = bio_list_pop(&list)))
bio               513 drivers/target/target_core_iblock.c 		bio_put(bio);
bio               625 drivers/target/target_core_iblock.c iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
bio               641 drivers/target/target_core_iblock.c 	bip = bio_integrity_alloc(bio, GFP_NOIO,
bio               648 drivers/target/target_core_iblock.c 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
bio               650 drivers/target/target_core_iblock.c 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
bio               660 drivers/target/target_core_iblock.c 		rc = bio_integrity_add_page(bio, miter->page, len,
bio               687 drivers/target/target_core_iblock.c 	struct bio *bio;
bio               724 drivers/target/target_core_iblock.c 	bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
bio               725 drivers/target/target_core_iblock.c 	if (!bio)
bio               729 drivers/target/target_core_iblock.c 	bio_list_add(&list, bio);
bio               745 drivers/target/target_core_iblock.c 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
bio               748 drivers/target/target_core_iblock.c 				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
bio               758 drivers/target/target_core_iblock.c 			bio = iblock_get_bio(cmd, block_lba, sg_num, op,
bio               760 drivers/target/target_core_iblock.c 			if (!bio)
bio               764 drivers/target/target_core_iblock.c 			bio_list_add(&list, bio);
bio               774 drivers/target/target_core_iblock.c 		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
bio               784 drivers/target/target_core_iblock.c 	while ((bio = bio_list_pop(&list)))
bio               785 drivers/target/target_core_iblock.c 		bio_put(bio);
bio               823 drivers/target/target_core_pscsi.c static void pscsi_bi_endio(struct bio *bio)
bio               825 drivers/target/target_core_pscsi.c 	bio_put(bio);
bio               828 drivers/target/target_core_pscsi.c static inline struct bio *pscsi_get_bio(int nr_vecs)
bio               830 drivers/target/target_core_pscsi.c 	struct bio *bio;
bio               835 drivers/target/target_core_pscsi.c 	bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
bio               836 drivers/target/target_core_pscsi.c 	if (!bio) {
bio               840 drivers/target/target_core_pscsi.c 	bio->bi_end_io = pscsi_bi_endio;
bio               842 drivers/target/target_core_pscsi.c 	return bio;
bio               850 drivers/target/target_core_pscsi.c 	struct bio *bio = NULL;
bio               882 drivers/target/target_core_pscsi.c 			if (!bio) {
bio               889 drivers/target/target_core_pscsi.c 				bio = pscsi_get_bio(nr_vecs);
bio               890 drivers/target/target_core_pscsi.c 				if (!bio)
bio               894 drivers/target/target_core_pscsi.c 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               897 drivers/target/target_core_pscsi.c 					" dir: %s nr_vecs: %d\n", bio,
bio               902 drivers/target/target_core_pscsi.c 				" bio: %p page: %p len: %d off: %d\n", i, bio,
bio               906 drivers/target/target_core_pscsi.c 					bio, page, bytes, off);
bio               908 drivers/target/target_core_pscsi.c 				bio_segments(bio), nr_vecs);
bio               912 drivers/target/target_core_pscsi.c 					" bio\n", bio->bi_vcnt, i, bio);
bio               914 drivers/target/target_core_pscsi.c 				rc = blk_rq_append_bio(req, &bio);
bio               924 drivers/target/target_core_pscsi.c 				bio = NULL;
bio               932 drivers/target/target_core_pscsi.c 	if (bio) {
bio               933 drivers/target/target_core_pscsi.c 		rc = blk_rq_append_bio(req, &bio);
bio               194 fs/block_dev.c static void blkdev_bio_end_io_simple(struct bio *bio)
bio               196 fs/block_dev.c 	struct task_struct *waiter = bio->bi_private;
bio               198 fs/block_dev.c 	WRITE_ONCE(bio->bi_private, NULL);
bio               211 fs/block_dev.c 	struct bio bio;
bio               228 fs/block_dev.c 	bio_init(&bio, vecs, nr_pages);
bio               229 fs/block_dev.c 	bio_set_dev(&bio, bdev);
bio               230 fs/block_dev.c 	bio.bi_iter.bi_sector = pos >> 9;
bio               231 fs/block_dev.c 	bio.bi_write_hint = iocb->ki_hint;
bio               232 fs/block_dev.c 	bio.bi_private = current;
bio               233 fs/block_dev.c 	bio.bi_end_io = blkdev_bio_end_io_simple;
bio               234 fs/block_dev.c 	bio.bi_ioprio = iocb->ki_ioprio;
bio               236 fs/block_dev.c 	ret = bio_iov_iter_get_pages(&bio, iter);
bio               239 fs/block_dev.c 	ret = bio.bi_iter.bi_size;
bio               242 fs/block_dev.c 		bio.bi_opf = REQ_OP_READ;
bio               246 fs/block_dev.c 		bio.bi_opf = dio_bio_write_op(iocb);
bio               250 fs/block_dev.c 		bio_set_polled(&bio, iocb);
bio               252 fs/block_dev.c 	qc = submit_bio(&bio);
bio               255 fs/block_dev.c 		if (!READ_ONCE(bio.bi_private))
bio               263 fs/block_dev.c 	bio_release_pages(&bio, should_dirty);
bio               264 fs/block_dev.c 	if (unlikely(bio.bi_status))
bio               265 fs/block_dev.c 		ret = blk_status_to_errno(bio.bi_status);
bio               271 fs/block_dev.c 	bio_uninit(&bio);
bio               286 fs/block_dev.c 	struct bio		bio;
bio               299 fs/block_dev.c static void blkdev_bio_end_io(struct bio *bio)
bio               301 fs/block_dev.c 	struct blkdev_dio *dio = bio->bi_private;
bio               304 fs/block_dev.c 	if (bio->bi_status && !dio->bio.bi_status)
bio               305 fs/block_dev.c 		dio->bio.bi_status = bio->bi_status;
bio               312 fs/block_dev.c 			if (likely(!dio->bio.bi_status)) {
bio               316 fs/block_dev.c 				ret = blk_status_to_errno(dio->bio.bi_status);
bio               321 fs/block_dev.c 				bio_put(&dio->bio);
bio               331 fs/block_dev.c 		bio_check_pages_dirty(bio);
bio               333 fs/block_dev.c 		bio_release_pages(bio, false);
bio               334 fs/block_dev.c 		bio_put(bio);
bio               346 fs/block_dev.c 	struct bio *bio;
bio               357 fs/block_dev.c 	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
bio               359 fs/block_dev.c 	dio = container_of(bio, struct blkdev_dio, bio);
bio               363 fs/block_dev.c 		bio_get(bio);
bio               380 fs/block_dev.c 		bio_set_dev(bio, bdev);
bio               381 fs/block_dev.c 		bio->bi_iter.bi_sector = pos >> 9;
bio               382 fs/block_dev.c 		bio->bi_write_hint = iocb->ki_hint;
bio               383 fs/block_dev.c 		bio->bi_private = dio;
bio               384 fs/block_dev.c 		bio->bi_end_io = blkdev_bio_end_io;
bio               385 fs/block_dev.c 		bio->bi_ioprio = iocb->ki_ioprio;
bio               387 fs/block_dev.c 		ret = bio_iov_iter_get_pages(bio, iter);
bio               389 fs/block_dev.c 			bio->bi_status = BLK_STS_IOERR;
bio               390 fs/block_dev.c 			bio_endio(bio);
bio               395 fs/block_dev.c 			bio->bi_opf = REQ_OP_READ;
bio               397 fs/block_dev.c 				bio_set_pages_dirty(bio);
bio               399 fs/block_dev.c 			bio->bi_opf = dio_bio_write_op(iocb);
bio               400 fs/block_dev.c 			task_io_account_write(bio->bi_iter.bi_size);
bio               403 fs/block_dev.c 		dio->size += bio->bi_iter.bi_size;
bio               404 fs/block_dev.c 		pos += bio->bi_iter.bi_size;
bio               411 fs/block_dev.c 				bio_set_polled(bio, iocb);
bio               415 fs/block_dev.c 			qc = submit_bio(bio);
bio               429 fs/block_dev.c 				bio_get(bio);
bio               436 fs/block_dev.c 		submit_bio(bio);
bio               437 fs/block_dev.c 		bio = bio_alloc(GFP_KERNEL, nr_pages);
bio               458 fs/block_dev.c 		ret = blk_status_to_errno(dio->bio.bi_status);
bio               462 fs/block_dev.c 	bio_put(&dio->bio);
bio               482 fs/block_dev.c 	return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
bio               310 fs/btrfs/btrfs_inode.h 	struct bio *orig_bio;
bio               313 fs/btrfs/btrfs_inode.h 	struct bio *dio_bio;
bio               157 fs/btrfs/check-integrity.c 		bio_end_io_t *bio;
bio               327 fs/btrfs/check-integrity.c 					  struct bio *bio, int *bio_is_patched,
bio               334 fs/btrfs/check-integrity.c static void btrfsic_bio_end_io(struct bio *bp);
bio               403 fs/btrfs/check-integrity.c 	b->orig_bio_bh_end_io.bio = NULL;
bio              1625 fs/btrfs/check-integrity.c 		struct bio *bio;
bio              1628 fs/btrfs/check-integrity.c 		bio = btrfs_io_bio_alloc(num_pages - i);
bio              1629 fs/btrfs/check-integrity.c 		bio_set_dev(bio, block_ctx->dev->bdev);
bio              1630 fs/btrfs/check-integrity.c 		bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio              1631 fs/btrfs/check-integrity.c 		bio->bi_opf = REQ_OP_READ;
bio              1634 fs/btrfs/check-integrity.c 			ret = bio_add_page(bio, block_ctx->pagev[j],
bio              1643 fs/btrfs/check-integrity.c 		if (submit_bio_wait(bio)) {
bio              1646 fs/btrfs/check-integrity.c 			bio_put(bio);
bio              1649 fs/btrfs/check-integrity.c 		bio_put(bio);
bio              1746 fs/btrfs/check-integrity.c 					  struct bio *bio, int *bio_is_patched,
bio              1902 fs/btrfs/check-integrity.c 			if (NULL != bio) {
bio              1907 fs/btrfs/check-integrity.c 					    bio->bi_private;
bio              1908 fs/btrfs/check-integrity.c 					block->orig_bio_bh_end_io.bio =
bio              1909 fs/btrfs/check-integrity.c 					    bio->bi_end_io;
bio              1911 fs/btrfs/check-integrity.c 					bio->bi_private = block;
bio              1912 fs/btrfs/check-integrity.c 					bio->bi_end_io = btrfsic_bio_end_io;
bio              1917 fs/btrfs/check-integrity.c 					    bio->bi_private;
bio              1922 fs/btrfs/check-integrity.c 					block->orig_bio_bh_end_io.bio =
bio              1924 fs/btrfs/check-integrity.c 					    bio;
bio              1926 fs/btrfs/check-integrity.c 					bio->bi_private = block;
bio              1938 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio = NULL;
bio              2042 fs/btrfs/check-integrity.c 		if (NULL != bio) {
bio              2046 fs/btrfs/check-integrity.c 				block->orig_bio_bh_private = bio->bi_private;
bio              2047 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio = bio->bi_end_io;
bio              2049 fs/btrfs/check-integrity.c 				bio->bi_private = block;
bio              2050 fs/btrfs/check-integrity.c 				bio->bi_end_io = btrfsic_bio_end_io;
bio              2055 fs/btrfs/check-integrity.c 				    bio->bi_private;
bio              2060 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio =
bio              2061 fs/btrfs/check-integrity.c 				    chained_block->orig_bio_bh_end_io.bio;
bio              2063 fs/btrfs/check-integrity.c 				bio->bi_private = block;
bio              2075 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bio = NULL;
bio              2104 fs/btrfs/check-integrity.c static void btrfsic_bio_end_io(struct bio *bp)
bio              2117 fs/btrfs/check-integrity.c 	bp->bi_end_io = block->orig_bio_bh_end_io.bio;
bio              2791 fs/btrfs/check-integrity.c static void __btrfsic_submit_bio(struct bio *bio)
bio              2801 fs/btrfs/check-integrity.c 	dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno);
bio              2803 fs/btrfs/check-integrity.c 	    (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
bio              2811 fs/btrfs/check-integrity.c 		unsigned int segs = bio_segments(bio);
bio              2813 fs/btrfs/check-integrity.c 		dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio              2818 fs/btrfs/check-integrity.c 			       bio_op(bio), bio->bi_opf, segs,
bio              2819 fs/btrfs/check-integrity.c 			       (unsigned long long)bio->bi_iter.bi_sector,
bio              2820 fs/btrfs/check-integrity.c 			       dev_bytenr, bio->bi_disk);
bio              2828 fs/btrfs/check-integrity.c 		bio_for_each_segment(bvec, bio, iter) {
bio              2841 fs/btrfs/check-integrity.c 					      bio, &bio_is_patched,
bio              2842 fs/btrfs/check-integrity.c 					      NULL, bio->bi_opf);
bio              2843 fs/btrfs/check-integrity.c 		bio_for_each_segment(bvec, bio, iter)
bio              2846 fs/btrfs/check-integrity.c 	} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
bio              2850 fs/btrfs/check-integrity.c 			       bio_op(bio), bio->bi_opf, bio->bi_disk);
bio              2865 fs/btrfs/check-integrity.c 			block->submit_bio_bh_rw = bio->bi_opf;
bio              2866 fs/btrfs/check-integrity.c 			block->orig_bio_bh_private = bio->bi_private;
bio              2867 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bio = bio->bi_end_io;
bio              2869 fs/btrfs/check-integrity.c 			bio->bi_private = block;
bio              2870 fs/btrfs/check-integrity.c 			bio->bi_end_io = btrfsic_bio_end_io;
bio              2877 fs/btrfs/check-integrity.c void btrfsic_submit_bio(struct bio *bio)
bio              2879 fs/btrfs/check-integrity.c 	__btrfsic_submit_bio(bio);
bio              2880 fs/btrfs/check-integrity.c 	submit_bio(bio);
bio              2883 fs/btrfs/check-integrity.c int btrfsic_submit_bio_wait(struct bio *bio)
bio              2885 fs/btrfs/check-integrity.c 	__btrfsic_submit_bio(bio);
bio              2886 fs/btrfs/check-integrity.c 	return submit_bio_wait(bio);
bio                11 fs/btrfs/check-integrity.h void btrfsic_submit_bio(struct bio *bio);
bio                12 fs/btrfs/check-integrity.h int btrfsic_submit_bio_wait(struct bio *bio);
bio               126 fs/btrfs/compression.c static void end_compressed_bio_read(struct bio *bio)
bio               128 fs/btrfs/compression.c 	struct compressed_bio *cb = bio->bi_private;
bio               132 fs/btrfs/compression.c 	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
bio               135 fs/btrfs/compression.c 	if (bio->bi_status)
bio               161 fs/btrfs/compression.c 				    (u64)bio->bi_iter.bi_sector << 9);
bio               193 fs/btrfs/compression.c 		ASSERT(!bio_flagged(bio, BIO_CLONED));
bio               204 fs/btrfs/compression.c 	bio_put(bio);
bio               253 fs/btrfs/compression.c static void end_compressed_bio_write(struct bio *bio)
bio               255 fs/btrfs/compression.c 	struct compressed_bio *cb = bio->bi_private;
bio               260 fs/btrfs/compression.c 	if (bio->bi_status)
bio               276 fs/btrfs/compression.c 			bio->bi_status == BLK_STS_OK);
bio               297 fs/btrfs/compression.c 	bio_put(bio);
bio               317 fs/btrfs/compression.c 	struct bio *bio = NULL;
bio               344 fs/btrfs/compression.c 	bio = btrfs_bio_alloc(first_byte);
bio               345 fs/btrfs/compression.c 	bio_set_dev(bio, bdev);
bio               346 fs/btrfs/compression.c 	bio->bi_opf = REQ_OP_WRITE | write_flags;
bio               347 fs/btrfs/compression.c 	bio->bi_private = cb;
bio               348 fs/btrfs/compression.c 	bio->bi_end_io = end_compressed_bio_write;
bio               358 fs/btrfs/compression.c 		if (bio->bi_iter.bi_size)
bio               359 fs/btrfs/compression.c 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
bio               363 fs/btrfs/compression.c 		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
bio               372 fs/btrfs/compression.c 			ret = btrfs_bio_wq_end_io(fs_info, bio,
bio               377 fs/btrfs/compression.c 				ret = btrfs_csum_one_bio(inode, bio, start, 1);
bio               381 fs/btrfs/compression.c 			ret = btrfs_map_bio(fs_info, bio, 0, 1);
bio               383 fs/btrfs/compression.c 				bio->bi_status = ret;
bio               384 fs/btrfs/compression.c 				bio_endio(bio);
bio               387 fs/btrfs/compression.c 			bio = btrfs_bio_alloc(first_byte);
bio               388 fs/btrfs/compression.c 			bio_set_dev(bio, bdev);
bio               389 fs/btrfs/compression.c 			bio->bi_opf = REQ_OP_WRITE | write_flags;
bio               390 fs/btrfs/compression.c 			bio->bi_private = cb;
bio               391 fs/btrfs/compression.c 			bio->bi_end_io = end_compressed_bio_write;
bio               392 fs/btrfs/compression.c 			bio_add_page(bio, page, PAGE_SIZE, 0);
bio               404 fs/btrfs/compression.c 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
bio               408 fs/btrfs/compression.c 		ret = btrfs_csum_one_bio(inode, bio, start, 1);
bio               412 fs/btrfs/compression.c 	ret = btrfs_map_bio(fs_info, bio, 0, 1);
bio               414 fs/btrfs/compression.c 		bio->bi_status = ret;
bio               415 fs/btrfs/compression.c 		bio_endio(bio);
bio               421 fs/btrfs/compression.c static u64 bio_end_offset(struct bio *bio)
bio               423 fs/btrfs/compression.c 	struct bio_vec *last = bio_last_bvec_all(bio);
bio               546 fs/btrfs/compression.c blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio               557 fs/btrfs/compression.c 	struct bio *comp_bio;
bio               558 fs/btrfs/compression.c 	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
bio               572 fs/btrfs/compression.c 				   page_offset(bio_first_page_all(bio)),
bio               596 fs/btrfs/compression.c 	cb->len = bio->bi_iter.bi_size;
bio               599 fs/btrfs/compression.c 	cb->orig_bio = bio;
bio               624 fs/btrfs/compression.c 	cb->len = bio->bi_iter.bi_size;
bio              1125 fs/btrfs/compression.c 			      struct bio *bio)
bio              1134 fs/btrfs/compression.c 	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
bio              1174 fs/btrfs/compression.c 		bio_advance(bio, bytes);
bio              1175 fs/btrfs/compression.c 		if (!bio->bi_iter.bi_size)
bio              1177 fs/btrfs/compression.c 		bvec = bio_iter_iovec(bio, bio->bi_iter);
bio                58 fs/btrfs/compression.h 	struct bio *orig_bio;
bio                89 fs/btrfs/compression.h 			      struct bio *bio);
bio                97 fs/btrfs/compression.h blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio              2787 fs/btrfs/ctree.h blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
bio              2789 fs/btrfs/ctree.h blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
bio              2804 fs/btrfs/ctree.h blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
bio              2857 fs/btrfs/ctree.h int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
bio                72 fs/btrfs/disk-io.c 	struct bio *bio;
bio               107 fs/btrfs/disk-io.c 	struct bio *bio;
bio               704 fs/btrfs/disk-io.c static void end_workqueue_bio(struct bio *bio)
bio               706 fs/btrfs/disk-io.c 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
bio               711 fs/btrfs/disk-io.c 	end_io_wq->status = bio->bi_status;
bio               713 fs/btrfs/disk-io.c 	if (bio_op(bio) == REQ_OP_WRITE) {
bio               737 fs/btrfs/disk-io.c blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
bio               746 fs/btrfs/disk-io.c 	end_io_wq->private = bio->bi_private;
bio               747 fs/btrfs/disk-io.c 	end_io_wq->end_io = bio->bi_end_io;
bio               750 fs/btrfs/disk-io.c 	end_io_wq->bio = bio;
bio               753 fs/btrfs/disk-io.c 	bio->bi_private = end_io_wq;
bio               754 fs/btrfs/disk-io.c 	bio->bi_end_io = end_workqueue_bio;
bio               764 fs/btrfs/disk-io.c 	ret = async->submit_bio_start(async->private_data, async->bio,
bio               789 fs/btrfs/disk-io.c 		async->bio->bi_status = async->status;
bio               790 fs/btrfs/disk-io.c 		bio_endio(async->bio);
bio               794 fs/btrfs/disk-io.c 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
bio               797 fs/btrfs/disk-io.c 		async->bio->bi_status = ret;
bio               798 fs/btrfs/disk-io.c 		bio_endio(async->bio);
bio               810 fs/btrfs/disk-io.c blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio               822 fs/btrfs/disk-io.c 	async->bio = bio;
bio               833 fs/btrfs/disk-io.c 	if (op_is_sync(bio->bi_opf))
bio               840 fs/btrfs/disk-io.c static blk_status_t btree_csum_one_bio(struct bio *bio)
bio               847 fs/btrfs/disk-io.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio               848 fs/btrfs/disk-io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               858 fs/btrfs/disk-io.c static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
bio               865 fs/btrfs/disk-io.c 	return btree_csum_one_bio(bio);
bio               878 fs/btrfs/disk-io.c static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
bio               886 fs/btrfs/disk-io.c 	if (bio_op(bio) != REQ_OP_WRITE) {
bio               891 fs/btrfs/disk-io.c 		ret = btrfs_bio_wq_end_io(fs_info, bio,
bio               895 fs/btrfs/disk-io.c 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
bio               897 fs/btrfs/disk-io.c 		ret = btree_csum_one_bio(bio);
bio               900 fs/btrfs/disk-io.c 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
bio               906 fs/btrfs/disk-io.c 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
bio               915 fs/btrfs/disk-io.c 	bio->bi_status = ret;
bio               916 fs/btrfs/disk-io.c 	bio_endio(bio);
bio              1639 fs/btrfs/disk-io.c 	struct bio *bio;
bio              1643 fs/btrfs/disk-io.c 	bio = end_io_wq->bio;
bio              1645 fs/btrfs/disk-io.c 	bio->bi_status = end_io_wq->status;
bio              1646 fs/btrfs/disk-io.c 	bio->bi_private = end_io_wq->private;
bio              1647 fs/btrfs/disk-io.c 	bio->bi_end_io = end_io_wq->end_io;
bio              1648 fs/btrfs/disk-io.c 	bio_endio(bio);
bio              3610 fs/btrfs/disk-io.c static void btrfs_end_empty_barrier(struct bio *bio)
bio              3612 fs/btrfs/disk-io.c 	complete(bio->bi_private);
bio              3622 fs/btrfs/disk-io.c 	struct bio *bio = device->flush_bio;
bio              3627 fs/btrfs/disk-io.c 	bio_reset(bio);
bio              3628 fs/btrfs/disk-io.c 	bio->bi_end_io = btrfs_end_empty_barrier;
bio              3629 fs/btrfs/disk-io.c 	bio_set_dev(bio, device->bdev);
bio              3630 fs/btrfs/disk-io.c 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio              3632 fs/btrfs/disk-io.c 	bio->bi_private = &device->flush_wait;
bio              3634 fs/btrfs/disk-io.c 	btrfsic_submit_bio(bio);
bio              3643 fs/btrfs/disk-io.c 	struct bio *bio = device->flush_bio;
bio              3651 fs/btrfs/disk-io.c 	return bio->bi_status;
bio               116 fs/btrfs/disk-io.h blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
bio               118 fs/btrfs/disk-io.h blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio               122 fs/btrfs/disk-io.h blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
bio               119 fs/btrfs/extent_io.c 	struct bio *bio;
bio               148 fs/btrfs/extent_io.c static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
bio               152 fs/btrfs/extent_io.c 	struct extent_io_tree *tree = bio->bi_private;
bio               154 fs/btrfs/extent_io.c 	bio->bi_private = NULL;
bio               157 fs/btrfs/extent_io.c 		ret = tree->ops->submit_bio_hook(tree->private_data, bio,
bio               160 fs/btrfs/extent_io.c 		btrfsic_submit_bio(bio);
bio               168 fs/btrfs/extent_io.c 	if (epd->bio) {
bio               169 fs/btrfs/extent_io.c 		epd->bio->bi_status = errno_to_blk_status(ret);
bio               170 fs/btrfs/extent_io.c 		bio_endio(epd->bio);
bio               171 fs/btrfs/extent_io.c 		epd->bio = NULL;
bio               185 fs/btrfs/extent_io.c 	if (epd->bio) {
bio               186 fs/btrfs/extent_io.c 		ret = submit_one_bio(epd->bio, 0, 0);
bio               194 fs/btrfs/extent_io.c 		epd->bio = NULL;
bio               214 fs/btrfs/extent_io.c 			offsetof(struct btrfs_io_bio, bio),
bio              2183 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2193 fs/btrfs/extent_io.c 	bio = btrfs_io_bio_alloc(1);
bio              2194 fs/btrfs/extent_io.c 	bio->bi_iter.bi_size = 0;
bio              2214 fs/btrfs/extent_io.c 			bio_put(bio);
bio              2223 fs/btrfs/extent_io.c 			bio_put(bio);
bio              2230 fs/btrfs/extent_io.c 	bio->bi_iter.bi_sector = sector;
bio              2236 fs/btrfs/extent_io.c 		bio_put(bio);
bio              2239 fs/btrfs/extent_io.c 	bio_set_dev(bio, dev->bdev);
bio              2240 fs/btrfs/extent_io.c 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio              2241 fs/btrfs/extent_io.c 	bio_add_page(bio, page, length, pg_offset);
bio              2243 fs/btrfs/extent_io.c 	if (btrfsic_submit_bio_wait(bio)) {
bio              2246 fs/btrfs/extent_io.c 		bio_put(bio);
bio              2256 fs/btrfs/extent_io.c 	bio_put(bio);
bio              2528 fs/btrfs/extent_io.c struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio              2534 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2538 fs/btrfs/extent_io.c 	bio = btrfs_io_bio_alloc(1);
bio              2539 fs/btrfs/extent_io.c 	bio->bi_end_io = endio_func;
bio              2540 fs/btrfs/extent_io.c 	bio->bi_iter.bi_sector = failrec->logical >> 9;
bio              2541 fs/btrfs/extent_io.c 	bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
bio              2542 fs/btrfs/extent_io.c 	bio->bi_iter.bi_size = 0;
bio              2543 fs/btrfs/extent_io.c 	bio->bi_private = data;
bio              2549 fs/btrfs/extent_io.c 		btrfs_bio = btrfs_io_bio(bio);
bio              2556 fs/btrfs/extent_io.c 	bio_add_page(bio, page, failrec->len, pg_offset);
bio              2558 fs/btrfs/extent_io.c 	return bio;
bio              2567 fs/btrfs/extent_io.c static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
bio              2575 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2597 fs/btrfs/extent_io.c 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
bio              2601 fs/btrfs/extent_io.c 	bio->bi_opf = REQ_OP_READ | read_mode;
bio              2607 fs/btrfs/extent_io.c 	status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
bio              2611 fs/btrfs/extent_io.c 		bio_put(bio);
bio              2644 fs/btrfs/extent_io.c static void end_bio_extent_writepage(struct bio *bio)
bio              2646 fs/btrfs/extent_io.c 	int error = blk_status_to_errno(bio->bi_status);
bio              2652 fs/btrfs/extent_io.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              2653 fs/btrfs/extent_io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              2681 fs/btrfs/extent_io.c 	bio_put(bio);
bio              2707 fs/btrfs/extent_io.c static void end_bio_extent_readpage(struct bio *bio)
bio              2710 fs/btrfs/extent_io.c 	int uptodate = !bio->bi_status;
bio              2711 fs/btrfs/extent_io.c 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
bio              2723 fs/btrfs/extent_io.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              2724 fs/btrfs/extent_io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              2733 fs/btrfs/extent_io.c 			(u64)bio->bi_iter.bi_sector, bio->bi_status,
bio              2787 fs/btrfs/extent_io.c 			ret = bio_readpage_error(bio, offset, page, start, end,
bio              2790 fs/btrfs/extent_io.c 				uptodate = !bio->bi_status;
bio              2850 fs/btrfs/extent_io.c 	bio_put(bio);
bio              2860 fs/btrfs/extent_io.c 	memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
bio              2868 fs/btrfs/extent_io.c struct bio *btrfs_bio_alloc(u64 first_byte)
bio              2870 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2872 fs/btrfs/extent_io.c 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
bio              2873 fs/btrfs/extent_io.c 	bio->bi_iter.bi_sector = first_byte >> 9;
bio              2874 fs/btrfs/extent_io.c 	btrfs_io_bio_init(btrfs_io_bio(bio));
bio              2875 fs/btrfs/extent_io.c 	return bio;
bio              2878 fs/btrfs/extent_io.c struct bio *btrfs_bio_clone(struct bio *bio)
bio              2881 fs/btrfs/extent_io.c 	struct bio *new;
bio              2884 fs/btrfs/extent_io.c 	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
bio              2887 fs/btrfs/extent_io.c 	btrfs_bio->iter = bio->bi_iter;
bio              2891 fs/btrfs/extent_io.c struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
bio              2893 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2896 fs/btrfs/extent_io.c 	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
bio              2897 fs/btrfs/extent_io.c 	btrfs_io_bio_init(btrfs_io_bio(bio));
bio              2898 fs/btrfs/extent_io.c 	return bio;
bio              2901 fs/btrfs/extent_io.c struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
bio              2903 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2907 fs/btrfs/extent_io.c 	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
bio              2908 fs/btrfs/extent_io.c 	ASSERT(bio);
bio              2910 fs/btrfs/extent_io.c 	btrfs_bio = btrfs_io_bio(bio);
bio              2913 fs/btrfs/extent_io.c 	bio_trim(bio, offset >> 9, size >> 9);
bio              2914 fs/btrfs/extent_io.c 	btrfs_bio->iter = bio->bi_iter;
bio              2915 fs/btrfs/extent_io.c 	return bio;
bio              2939 fs/btrfs/extent_io.c 			      struct bio **bio_ret,
bio              2947 fs/btrfs/extent_io.c 	struct bio *bio;
bio              2957 fs/btrfs/extent_io.c 		bio = *bio_ret;
bio              2959 fs/btrfs/extent_io.c 			contig = bio->bi_iter.bi_sector == sector;
bio              2961 fs/btrfs/extent_io.c 			contig = bio_end_sector(bio) == sector;
bio              2964 fs/btrfs/extent_io.c 		if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
bio              2969 fs/btrfs/extent_io.c 		    bio_add_page(bio, page, page_size, pg_offset) < page_size) {
bio              2970 fs/btrfs/extent_io.c 			ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
bio              2975 fs/btrfs/extent_io.c 			bio = NULL;
bio              2983 fs/btrfs/extent_io.c 	bio = btrfs_bio_alloc(offset);
bio              2984 fs/btrfs/extent_io.c 	bio_set_dev(bio, bdev);
bio              2985 fs/btrfs/extent_io.c 	bio_add_page(bio, page, page_size, pg_offset);
bio              2986 fs/btrfs/extent_io.c 	bio->bi_end_io = end_io_func;
bio              2987 fs/btrfs/extent_io.c 	bio->bi_private = tree;
bio              2988 fs/btrfs/extent_io.c 	bio->bi_write_hint = page->mapping->host->i_write_hint;
bio              2989 fs/btrfs/extent_io.c 	bio->bi_opf = opf;
bio              2991 fs/btrfs/extent_io.c 		wbc_init_bio(wbc, bio);
bio              2995 fs/btrfs/extent_io.c 	*bio_ret = bio;
bio              3059 fs/btrfs/extent_io.c 			 struct bio **bio, int mirror_num,
bio              3239 fs/btrfs/extent_io.c 					 pg_offset, bdev, bio,
bio              3268 fs/btrfs/extent_io.c 					     struct bio **bio,
bio              3279 fs/btrfs/extent_io.c 				bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
bio              3287 fs/btrfs/extent_io.c 				   struct bio **bio, int mirror_num,
bio              3298 fs/btrfs/extent_io.c 	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
bio              3306 fs/btrfs/extent_io.c 	struct bio *bio = NULL;
bio              3310 fs/btrfs/extent_io.c 	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
bio              3312 fs/btrfs/extent_io.c 	if (bio)
bio              3313 fs/btrfs/extent_io.c 		ret = submit_one_bio(bio, mirror_num, bio_flags);
bio              3527 fs/btrfs/extent_io.c 					 bdev, &epd->bio,
bio              3819 fs/btrfs/extent_io.c static void end_bio_extent_buffer_writepage(struct bio *bio)
bio              3826 fs/btrfs/extent_io.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              3827 fs/btrfs/extent_io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              3834 fs/btrfs/extent_io.c 		if (bio->bi_status ||
bio              3848 fs/btrfs/extent_io.c 	bio_put(bio);
bio              3892 fs/btrfs/extent_io.c 					 &epd->bio,
bio              3926 fs/btrfs/extent_io.c 		.bio = NULL,
bio              4248 fs/btrfs/extent_io.c 		.bio = NULL,
bio              4277 fs/btrfs/extent_io.c 		.bio = NULL,
bio              4316 fs/btrfs/extent_io.c 		.bio = NULL,
bio              4335 fs/btrfs/extent_io.c 	struct bio *bio = NULL;
bio              4367 fs/btrfs/extent_io.c 				     contig_end, &em_cached, &bio, &bio_flags,
bio              4375 fs/btrfs/extent_io.c 	if (bio)
bio              4376 fs/btrfs/extent_io.c 		return submit_one_bio(bio, 0, bio_flags);
bio              5457 fs/btrfs/extent_io.c 	struct bio *bio = NULL;
bio              5508 fs/btrfs/extent_io.c 						      btree_get_extent, &bio,
bio              5528 fs/btrfs/extent_io.c 	if (bio) {
bio              5529 fs/btrfs/extent_io.c 		err = submit_one_bio(bio, mirror_num, bio_flags);
bio               100 fs/btrfs/extent_io.h 		struct bio *bio, u64 bio_offset);
bio               107 fs/btrfs/extent_io.h 	blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
bio               500 fs/btrfs/extent_io.h struct bio *btrfs_bio_alloc(u64 first_byte);
bio               501 fs/btrfs/extent_io.h struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
bio               502 fs/btrfs/extent_io.h struct bio *btrfs_bio_clone(struct bio *bio);
bio               503 fs/btrfs/extent_io.h struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
bio               544 fs/btrfs/extent_io.h struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio               151 fs/btrfs/file-item.c static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
bio               157 fs/btrfs/file-item.c 	struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
bio               176 fs/btrfs/file-item.c 	nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
bio               193 fs/btrfs/file-item.c 	if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
bio               207 fs/btrfs/file-item.c 	disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
bio               211 fs/btrfs/file-item.c 	bio_for_each_segment(bvec, bio, iter) {
bio               292 fs/btrfs/file-item.c blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
bio               295 fs/btrfs/file-item.c 	return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
bio               298 fs/btrfs/file-item.c blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
bio               300 fs/btrfs/file-item.c 	return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
bio               433 fs/btrfs/file-item.c blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
bio               453 fs/btrfs/file-item.c 	sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
bio               460 fs/btrfs/file-item.c 	sums->len = bio->bi_iter.bi_size;
bio               468 fs/btrfs/file-item.c 	sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
bio               473 fs/btrfs/file-item.c 	bio_for_each_segment(bvec, bio, iter) {
bio               496 fs/btrfs/file-item.c 				bytes_left = bio->bi_iter.bi_size - total_bytes;
bio               507 fs/btrfs/file-item.c 				sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
bio              2017 fs/btrfs/inode.c int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
bio              2022 fs/btrfs/inode.c 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
bio              2031 fs/btrfs/inode.c 	length = bio->bi_iter.bi_size;
bio              2033 fs/btrfs/inode.c 	ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length,
bio              2051 fs/btrfs/inode.c static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
bio              2057 fs/btrfs/inode.c 	ret = btrfs_csum_one_bio(inode, bio, 0, 0);
bio              2080 fs/btrfs/inode.c static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
bio              2097 fs/btrfs/inode.c 	if (bio_op(bio) != REQ_OP_WRITE) {
bio              2098 fs/btrfs/inode.c 		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
bio              2103 fs/btrfs/inode.c 			ret = btrfs_submit_compressed_read(inode, bio,
bio              2108 fs/btrfs/inode.c 			ret = btrfs_lookup_bio_sums(inode, bio, NULL);
bio              2118 fs/btrfs/inode.c 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
bio              2122 fs/btrfs/inode.c 		ret = btrfs_csum_one_bio(inode, bio, 0, 0);
bio              2128 fs/btrfs/inode.c 	ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
bio              2132 fs/btrfs/inode.c 		bio->bi_status = ret;
bio              2133 fs/btrfs/inode.c 		bio_endio(bio);
bio              7956 fs/btrfs/inode.c 						 struct bio *bio,
bio              7962 fs/btrfs/inode.c 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
bio              7964 fs/btrfs/inode.c 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
bio              7968 fs/btrfs/inode.c 	ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
bio              7974 fs/btrfs/inode.c 				      struct bio *failed_bio,
bio              8009 fs/btrfs/inode.c static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
bio              8017 fs/btrfs/inode.c 	struct bio *bio;
bio              8046 fs/btrfs/inode.c 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
bio              8048 fs/btrfs/inode.c 	bio->bi_opf = REQ_OP_READ | read_mode;
bio              8054 fs/btrfs/inode.c 	status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
bio              8057 fs/btrfs/inode.c 		bio_put(bio);
bio              8070 fs/btrfs/inode.c static void btrfs_retry_endio_nocsum(struct bio *bio)
bio              8072 fs/btrfs/inode.c 	struct btrfs_retry_complete *done = bio->bi_private;
bio              8078 fs/btrfs/inode.c 	if (bio->bi_status)
bio              8081 fs/btrfs/inode.c 	ASSERT(bio->bi_vcnt == 1);
bio              8084 fs/btrfs/inode.c 	ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
bio              8087 fs/btrfs/inode.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              8088 fs/btrfs/inode.c 	bio_for_each_segment_all(bvec, bio, iter_all)
bio              8094 fs/btrfs/inode.c 	bio_put(bio);
bio              8116 fs/btrfs/inode.c 	io_bio->bio.bi_iter = io_bio->iter;
bio              8118 fs/btrfs/inode.c 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
bio              8127 fs/btrfs/inode.c 		ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
bio              8157 fs/btrfs/inode.c static void btrfs_retry_endio(struct bio *bio)
bio              8159 fs/btrfs/inode.c 	struct btrfs_retry_complete *done = bio->bi_private;
bio              8160 fs/btrfs/inode.c 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
bio              8169 fs/btrfs/inode.c 	if (bio->bi_status)
bio              8174 fs/btrfs/inode.c 	ASSERT(bio->bi_vcnt == 1);
bio              8175 fs/btrfs/inode.c 	ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
bio              8180 fs/btrfs/inode.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              8181 fs/btrfs/inode.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio              8199 fs/btrfs/inode.c 	bio_put(bio);
bio              8225 fs/btrfs/inode.c 	io_bio->bio.bi_iter = io_bio->iter;
bio              8227 fs/btrfs/inode.c 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
bio              8244 fs/btrfs/inode.c 		status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
bio              8291 fs/btrfs/inode.c static void btrfs_endio_direct_read(struct bio *bio)
bio              8293 fs/btrfs/inode.c 	struct btrfs_dio_private *dip = bio->bi_private;
bio              8295 fs/btrfs/inode.c 	struct bio *dio_bio;
bio              8296 fs/btrfs/inode.c 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
bio              8297 fs/btrfs/inode.c 	blk_status_t err = bio->bi_status;
bio              8311 fs/btrfs/inode.c 	bio_put(bio);
bio              8357 fs/btrfs/inode.c static void btrfs_endio_direct_write(struct bio *bio)
bio              8359 fs/btrfs/inode.c 	struct btrfs_dio_private *dip = bio->bi_private;
bio              8360 fs/btrfs/inode.c 	struct bio *dio_bio = dip->dio_bio;
bio              8363 fs/btrfs/inode.c 				     dip->bytes, !bio->bi_status);
bio              8367 fs/btrfs/inode.c 	dio_bio->bi_status = bio->bi_status;
bio              8369 fs/btrfs/inode.c 	bio_put(bio);
bio              8373 fs/btrfs/inode.c 				    struct bio *bio, u64 offset)
bio              8377 fs/btrfs/inode.c 	ret = btrfs_csum_one_bio(inode, bio, offset, 1);
bio              8382 fs/btrfs/inode.c static void btrfs_end_dio_bio(struct bio *bio)
bio              8384 fs/btrfs/inode.c 	struct btrfs_dio_private *dip = bio->bi_private;
bio              8385 fs/btrfs/inode.c 	blk_status_t err = bio->bi_status;
bio              8390 fs/btrfs/inode.c 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
bio              8391 fs/btrfs/inode.c 			   bio->bi_opf,
bio              8392 fs/btrfs/inode.c 			   (unsigned long long)bio->bi_iter.bi_sector,
bio              8393 fs/btrfs/inode.c 			   bio->bi_iter.bi_size, err);
bio              8396 fs/btrfs/inode.c 		err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
bio              8419 fs/btrfs/inode.c 	bio_put(bio);
bio              8424 fs/btrfs/inode.c 						 struct bio *bio,
bio              8427 fs/btrfs/inode.c 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
bio              8444 fs/btrfs/inode.c 	if (bio == dip->orig_bio)
bio              8455 fs/btrfs/inode.c static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
bio              8459 fs/btrfs/inode.c 	struct btrfs_dio_private *dip = bio->bi_private;
bio              8460 fs/btrfs/inode.c 	bool write = bio_op(bio) == REQ_OP_WRITE;
bio              8468 fs/btrfs/inode.c 		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
bio              8477 fs/btrfs/inode.c 		ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
bio              8486 fs/btrfs/inode.c 		ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
bio              8490 fs/btrfs/inode.c 		ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
bio              8496 fs/btrfs/inode.c 	ret = btrfs_map_bio(fs_info, bio, 0, 0);
bio              8505 fs/btrfs/inode.c 	struct bio *bio;
bio              8506 fs/btrfs/inode.c 	struct bio *orig_bio = dip->orig_bio;
bio              8524 fs/btrfs/inode.c 		bio = orig_bio;
bio              8545 fs/btrfs/inode.c 		bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
bio              8547 fs/btrfs/inode.c 		bio->bi_private = dip;
bio              8548 fs/btrfs/inode.c 		bio->bi_end_io = btrfs_end_dio_bio;
bio              8549 fs/btrfs/inode.c 		btrfs_io_bio(bio)->logical = file_offset;
bio              8564 fs/btrfs/inode.c 		status = btrfs_submit_dio_bio(bio, inode, file_offset,
bio              8567 fs/btrfs/inode.c 			bio_put(bio);
bio              8583 fs/btrfs/inode.c 	status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
bio              8587 fs/btrfs/inode.c 	bio_put(bio);
bio              8603 fs/btrfs/inode.c static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
bio              8607 fs/btrfs/inode.c 	struct bio *bio = NULL;
bio              8612 fs/btrfs/inode.c 	bio = btrfs_bio_clone(dio_bio);
bio              8625 fs/btrfs/inode.c 	bio->bi_private = dip;
bio              8626 fs/btrfs/inode.c 	dip->orig_bio = bio;
bio              8629 fs/btrfs/inode.c 	io_bio = btrfs_io_bio(bio);
bio              8633 fs/btrfs/inode.c 		bio->bi_end_io = btrfs_endio_direct_write;
bio              8635 fs/btrfs/inode.c 		bio->bi_end_io = btrfs_endio_direct_read;
bio              8670 fs/btrfs/inode.c 	if (bio && dip) {
bio              8671 fs/btrfs/inode.c 		bio_io_error(bio);
bio              8678 fs/btrfs/inode.c 		bio = NULL;
bio              8696 fs/btrfs/inode.c 	if (bio)
bio              8697 fs/btrfs/inode.c 		bio_put(bio);
bio               330 fs/btrfs/lzo.c 	struct bio *orig_bio = cb->orig_bio;
bio               181 fs/btrfs/raid56.c static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
bio               857 fs/btrfs/raid56.c static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
bio               859 fs/btrfs/raid56.c 	struct bio *next;
bio               876 fs/btrfs/raid56.c 	struct bio *cur = bio_list_get(&rbio->bio_list);
bio               877 fs/btrfs/raid56.c 	struct bio *extra;
bio               903 fs/btrfs/raid56.c static void raid_write_end_io(struct bio *bio)
bio               905 fs/btrfs/raid56.c 	struct btrfs_raid_bio *rbio = bio->bi_private;
bio               906 fs/btrfs/raid56.c 	blk_status_t err = bio->bi_status;
bio               910 fs/btrfs/raid56.c 		fail_bio_stripe(rbio, bio);
bio               912 fs/btrfs/raid56.c 	bio_put(bio);
bio              1089 fs/btrfs/raid56.c 	struct bio *last = bio_list->tail;
bio              1092 fs/btrfs/raid56.c 	struct bio *bio;
bio              1123 fs/btrfs/raid56.c 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
bio              1124 fs/btrfs/raid56.c 	bio->bi_iter.bi_size = 0;
bio              1125 fs/btrfs/raid56.c 	bio_set_dev(bio, stripe->dev->bdev);
bio              1126 fs/btrfs/raid56.c 	bio->bi_iter.bi_sector = disk_start >> 9;
bio              1128 fs/btrfs/raid56.c 	bio_add_page(bio, page, PAGE_SIZE, 0);
bio              1129 fs/btrfs/raid56.c 	bio_list_add(bio_list, bio);
bio              1160 fs/btrfs/raid56.c 	struct bio *bio;
bio              1166 fs/btrfs/raid56.c 	bio_list_for_each(bio, &rbio->bio_list) {
bio              1171 fs/btrfs/raid56.c 		start = (u64)bio->bi_iter.bi_sector << 9;
bio              1175 fs/btrfs/raid56.c 		if (bio_flagged(bio, BIO_CLONED))
bio              1176 fs/btrfs/raid56.c 			bio->bi_iter = btrfs_io_bio(bio)->iter;
bio              1178 fs/btrfs/raid56.c 		bio_for_each_segment(bvec, bio, iter) {
bio              1204 fs/btrfs/raid56.c 	struct bio *bio;
bio              1336 fs/btrfs/raid56.c 		bio = bio_list_pop(&bio_list);
bio              1337 fs/btrfs/raid56.c 		if (!bio)
bio              1340 fs/btrfs/raid56.c 		bio->bi_private = rbio;
bio              1341 fs/btrfs/raid56.c 		bio->bi_end_io = raid_write_end_io;
bio              1342 fs/btrfs/raid56.c 		bio->bi_opf = REQ_OP_WRITE;
bio              1344 fs/btrfs/raid56.c 		submit_bio(bio);
bio              1351 fs/btrfs/raid56.c 	while ((bio = bio_list_pop(&bio_list)))
bio              1352 fs/btrfs/raid56.c 		bio_put(bio);
bio              1361 fs/btrfs/raid56.c 			   struct bio *bio)
bio              1363 fs/btrfs/raid56.c 	u64 physical = bio->bi_iter.bi_sector;
bio              1376 fs/btrfs/raid56.c 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
bio              1377 fs/btrfs/raid56.c 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
bio              1390 fs/btrfs/raid56.c 				   struct bio *bio)
bio              1392 fs/btrfs/raid56.c 	u64 logical = bio->bi_iter.bi_sector;
bio              1444 fs/btrfs/raid56.c 			   struct bio *bio)
bio              1446 fs/btrfs/raid56.c 	int failed = find_bio_stripe(rbio, bio);
bio              1458 fs/btrfs/raid56.c static void set_bio_pages_uptodate(struct bio *bio)
bio              1463 fs/btrfs/raid56.c 	ASSERT(!bio_flagged(bio, BIO_CLONED));
bio              1465 fs/btrfs/raid56.c 	bio_for_each_segment_all(bvec, bio, iter_all)
bio              1477 fs/btrfs/raid56.c static void raid_rmw_end_io(struct bio *bio)
bio              1479 fs/btrfs/raid56.c 	struct btrfs_raid_bio *rbio = bio->bi_private;
bio              1481 fs/btrfs/raid56.c 	if (bio->bi_status)
bio              1482 fs/btrfs/raid56.c 		fail_bio_stripe(rbio, bio);
bio              1484 fs/btrfs/raid56.c 		set_bio_pages_uptodate(bio);
bio              1486 fs/btrfs/raid56.c 	bio_put(bio);
bio              1518 fs/btrfs/raid56.c 	struct bio *bio;
bio              1578 fs/btrfs/raid56.c 		bio = bio_list_pop(&bio_list);
bio              1579 fs/btrfs/raid56.c 		if (!bio)
bio              1582 fs/btrfs/raid56.c 		bio->bi_private = rbio;
bio              1583 fs/btrfs/raid56.c 		bio->bi_end_io = raid_rmw_end_io;
bio              1584 fs/btrfs/raid56.c 		bio->bi_opf = REQ_OP_READ;
bio              1586 fs/btrfs/raid56.c 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
bio              1588 fs/btrfs/raid56.c 		submit_bio(bio);
bio              1596 fs/btrfs/raid56.c 	while ((bio = bio_list_pop(&bio_list)))
bio              1597 fs/btrfs/raid56.c 		bio_put(bio);
bio              1757 fs/btrfs/raid56.c int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
bio              1770 fs/btrfs/raid56.c 	bio_list_add(&rbio->bio_list, bio);
bio              1771 fs/btrfs/raid56.c 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
bio              2025 fs/btrfs/raid56.c static void raid_recover_end_io(struct bio *bio)
bio              2027 fs/btrfs/raid56.c 	struct btrfs_raid_bio *rbio = bio->bi_private;
bio              2033 fs/btrfs/raid56.c 	if (bio->bi_status)
bio              2034 fs/btrfs/raid56.c 		fail_bio_stripe(rbio, bio);
bio              2036 fs/btrfs/raid56.c 		set_bio_pages_uptodate(bio);
bio              2037 fs/btrfs/raid56.c 	bio_put(bio);
bio              2063 fs/btrfs/raid56.c 	struct bio *bio;
bio              2124 fs/btrfs/raid56.c 		bio = bio_list_pop(&bio_list);
bio              2125 fs/btrfs/raid56.c 		if (!bio)
bio              2128 fs/btrfs/raid56.c 		bio->bi_private = rbio;
bio              2129 fs/btrfs/raid56.c 		bio->bi_end_io = raid_recover_end_io;
bio              2130 fs/btrfs/raid56.c 		bio->bi_opf = REQ_OP_READ;
bio              2132 fs/btrfs/raid56.c 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
bio              2134 fs/btrfs/raid56.c 		submit_bio(bio);
bio              2144 fs/btrfs/raid56.c 	while ((bio = bio_list_pop(&bio_list)))
bio              2145 fs/btrfs/raid56.c 		bio_put(bio);
bio              2156 fs/btrfs/raid56.c int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
bio              2165 fs/btrfs/raid56.c 		btrfs_io_bio(bio)->mirror_num = mirror_num;
bio              2176 fs/btrfs/raid56.c 	bio_list_add(&rbio->bio_list, bio);
bio              2177 fs/btrfs/raid56.c 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
bio              2179 fs/btrfs/raid56.c 	rbio->faila = find_logical_bio_stripe(rbio, bio);
bio              2183 fs/btrfs/raid56.c 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
bio              2184 fs/btrfs/raid56.c 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
bio              2262 fs/btrfs/raid56.c raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio              2273 fs/btrfs/raid56.c 	bio_list_add(&rbio->bio_list, bio);
bio              2278 fs/btrfs/raid56.c 	ASSERT(!bio->bi_iter.bi_size);
bio              2363 fs/btrfs/raid56.c 	struct bio *bio;
bio              2497 fs/btrfs/raid56.c 		bio = bio_list_pop(&bio_list);
bio              2498 fs/btrfs/raid56.c 		if (!bio)
bio              2501 fs/btrfs/raid56.c 		bio->bi_private = rbio;
bio              2502 fs/btrfs/raid56.c 		bio->bi_end_io = raid_write_end_io;
bio              2503 fs/btrfs/raid56.c 		bio->bi_opf = REQ_OP_WRITE;
bio              2505 fs/btrfs/raid56.c 		submit_bio(bio);
bio              2512 fs/btrfs/raid56.c 	while ((bio = bio_list_pop(&bio_list)))
bio              2513 fs/btrfs/raid56.c 		bio_put(bio);
bio              2592 fs/btrfs/raid56.c static void raid56_parity_scrub_end_io(struct bio *bio)
bio              2594 fs/btrfs/raid56.c 	struct btrfs_raid_bio *rbio = bio->bi_private;
bio              2596 fs/btrfs/raid56.c 	if (bio->bi_status)
bio              2597 fs/btrfs/raid56.c 		fail_bio_stripe(rbio, bio);
bio              2599 fs/btrfs/raid56.c 		set_bio_pages_uptodate(bio);
bio              2601 fs/btrfs/raid56.c 	bio_put(bio);
bio              2621 fs/btrfs/raid56.c 	struct bio *bio;
bio              2679 fs/btrfs/raid56.c 		bio = bio_list_pop(&bio_list);
bio              2680 fs/btrfs/raid56.c 		if (!bio)
bio              2683 fs/btrfs/raid56.c 		bio->bi_private = rbio;
bio              2684 fs/btrfs/raid56.c 		bio->bi_end_io = raid56_parity_scrub_end_io;
bio              2685 fs/btrfs/raid56.c 		bio->bi_opf = REQ_OP_READ;
bio              2687 fs/btrfs/raid56.c 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
bio              2689 fs/btrfs/raid56.c 		submit_bio(bio);
bio              2697 fs/btrfs/raid56.c 	while ((bio = bio_list_pop(&bio_list)))
bio              2698 fs/btrfs/raid56.c 		bio_put(bio);
bio              2723 fs/btrfs/raid56.c raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio              2733 fs/btrfs/raid56.c 	bio_list_add(&rbio->bio_list, bio);
bio              2738 fs/btrfs/raid56.c 	ASSERT(!bio->bi_iter.bi_size);
bio              2740 fs/btrfs/raid56.c 	rbio->faila = find_logical_bio_stripe(rbio, bio);
bio                33 fs/btrfs/raid56.h int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
bio                36 fs/btrfs/raid56.h int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
bio                43 fs/btrfs/raid56.h raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio                50 fs/btrfs/raid56.h raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio                87 fs/btrfs/scrub.c 	struct bio		*bio;
bio               241 fs/btrfs/scrub.c static void scrub_bio_end_io(struct bio *bio);
bio               252 fs/btrfs/scrub.c static void scrub_wr_bio_end_io(struct bio *bio);
bio               553 fs/btrfs/scrub.c 		bio_put(sbio->bio);
bio              1391 fs/btrfs/scrub.c static void scrub_bio_wait_endio(struct bio *bio)
bio              1393 fs/btrfs/scrub.c 	complete(bio->bi_private);
bio              1397 fs/btrfs/scrub.c 					struct bio *bio,
bio              1404 fs/btrfs/scrub.c 	bio->bi_iter.bi_sector = page->logical >> 9;
bio              1405 fs/btrfs/scrub.c 	bio->bi_private = &done;
bio              1406 fs/btrfs/scrub.c 	bio->bi_end_io = scrub_bio_wait_endio;
bio              1409 fs/btrfs/scrub.c 	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
bio              1416 fs/btrfs/scrub.c 	return blk_status_to_errno(bio->bi_status);
bio              1423 fs/btrfs/scrub.c 	struct bio *bio;
bio              1431 fs/btrfs/scrub.c 	bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
bio              1432 fs/btrfs/scrub.c 	bio_set_dev(bio, first_page->dev->bdev);
bio              1438 fs/btrfs/scrub.c 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
bio              1441 fs/btrfs/scrub.c 	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
bio              1442 fs/btrfs/scrub.c 		bio_put(bio);
bio              1446 fs/btrfs/scrub.c 	bio_put(bio);
bio              1478 fs/btrfs/scrub.c 		struct bio *bio;
bio              1488 fs/btrfs/scrub.c 		bio = btrfs_io_bio_alloc(1);
bio              1489 fs/btrfs/scrub.c 		bio_set_dev(bio, page->dev->bdev);
bio              1491 fs/btrfs/scrub.c 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
bio              1492 fs/btrfs/scrub.c 		bio->bi_iter.bi_sector = page->physical >> 9;
bio              1493 fs/btrfs/scrub.c 		bio->bi_opf = REQ_OP_READ;
bio              1495 fs/btrfs/scrub.c 		if (btrfsic_submit_bio_wait(bio)) {
bio              1500 fs/btrfs/scrub.c 		bio_put(bio);
bio              1560 fs/btrfs/scrub.c 		struct bio *bio;
bio              1569 fs/btrfs/scrub.c 		bio = btrfs_io_bio_alloc(1);
bio              1570 fs/btrfs/scrub.c 		bio_set_dev(bio, page_bad->dev->bdev);
bio              1571 fs/btrfs/scrub.c 		bio->bi_iter.bi_sector = page_bad->physical >> 9;
bio              1572 fs/btrfs/scrub.c 		bio->bi_opf = REQ_OP_WRITE;
bio              1574 fs/btrfs/scrub.c 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
bio              1576 fs/btrfs/scrub.c 			bio_put(bio);
bio              1580 fs/btrfs/scrub.c 		if (btrfsic_submit_bio_wait(bio)) {
bio              1584 fs/btrfs/scrub.c 			bio_put(bio);
bio              1587 fs/btrfs/scrub.c 		bio_put(bio);
bio              1650 fs/btrfs/scrub.c 		struct bio *bio;
bio              1655 fs/btrfs/scrub.c 		bio = sbio->bio;
bio              1656 fs/btrfs/scrub.c 		if (!bio) {
bio              1657 fs/btrfs/scrub.c 			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
bio              1658 fs/btrfs/scrub.c 			sbio->bio = bio;
bio              1661 fs/btrfs/scrub.c 		bio->bi_private = sbio;
bio              1662 fs/btrfs/scrub.c 		bio->bi_end_io = scrub_wr_bio_end_io;
bio              1663 fs/btrfs/scrub.c 		bio_set_dev(bio, sbio->dev->bdev);
bio              1664 fs/btrfs/scrub.c 		bio->bi_iter.bi_sector = sbio->physical >> 9;
bio              1665 fs/btrfs/scrub.c 		bio->bi_opf = REQ_OP_WRITE;
bio              1675 fs/btrfs/scrub.c 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
bio              1678 fs/btrfs/scrub.c 			bio_put(sbio->bio);
bio              1679 fs/btrfs/scrub.c 			sbio->bio = NULL;
bio              1706 fs/btrfs/scrub.c 	WARN_ON(!sbio->bio->bi_disk);
bio              1712 fs/btrfs/scrub.c 	btrfsic_submit_bio(sbio->bio);
bio              1715 fs/btrfs/scrub.c static void scrub_wr_bio_end_io(struct bio *bio)
bio              1717 fs/btrfs/scrub.c 	struct scrub_bio *sbio = bio->bi_private;
bio              1720 fs/btrfs/scrub.c 	sbio->status = bio->bi_status;
bio              1721 fs/btrfs/scrub.c 	sbio->bio = bio;
bio              1749 fs/btrfs/scrub.c 	bio_put(sbio->bio);
bio              2030 fs/btrfs/scrub.c 	btrfsic_submit_bio(sbio->bio);
bio              2059 fs/btrfs/scrub.c 		struct bio *bio;
bio              2064 fs/btrfs/scrub.c 		bio = sbio->bio;
bio              2065 fs/btrfs/scrub.c 		if (!bio) {
bio              2066 fs/btrfs/scrub.c 			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
bio              2067 fs/btrfs/scrub.c 			sbio->bio = bio;
bio              2070 fs/btrfs/scrub.c 		bio->bi_private = sbio;
bio              2071 fs/btrfs/scrub.c 		bio->bi_end_io = scrub_bio_end_io;
bio              2072 fs/btrfs/scrub.c 		bio_set_dev(bio, sbio->dev->bdev);
bio              2073 fs/btrfs/scrub.c 		bio->bi_iter.bi_sector = sbio->physical >> 9;
bio              2074 fs/btrfs/scrub.c 		bio->bi_opf = REQ_OP_READ;
bio              2086 fs/btrfs/scrub.c 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
bio              2089 fs/btrfs/scrub.c 			bio_put(sbio->bio);
bio              2090 fs/btrfs/scrub.c 			sbio->bio = NULL;
bio              2106 fs/btrfs/scrub.c static void scrub_missing_raid56_end_io(struct bio *bio)
bio              2108 fs/btrfs/scrub.c 	struct scrub_block *sblock = bio->bi_private;
bio              2111 fs/btrfs/scrub.c 	if (bio->bi_status)
bio              2114 fs/btrfs/scrub.c 	bio_put(bio);
bio              2168 fs/btrfs/scrub.c 	struct bio *bio;
bio              2190 fs/btrfs/scrub.c 	bio = btrfs_io_bio_alloc(0);
bio              2191 fs/btrfs/scrub.c 	bio->bi_iter.bi_sector = logical >> 9;
bio              2192 fs/btrfs/scrub.c 	bio->bi_private = sblock;
bio              2193 fs/btrfs/scrub.c 	bio->bi_end_io = scrub_missing_raid56_end_io;
bio              2195 fs/btrfs/scrub.c 	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
bio              2212 fs/btrfs/scrub.c 	bio_put(bio);
bio              2311 fs/btrfs/scrub.c static void scrub_bio_end_io(struct bio *bio)
bio              2313 fs/btrfs/scrub.c 	struct scrub_bio *sbio = bio->bi_private;
bio              2316 fs/btrfs/scrub.c 	sbio->status = bio->bi_status;
bio              2317 fs/btrfs/scrub.c 	sbio->bio = bio;
bio              2348 fs/btrfs/scrub.c 	bio_put(sbio->bio);
bio              2349 fs/btrfs/scrub.c 	sbio->bio = NULL;
bio              2732 fs/btrfs/scrub.c static void scrub_parity_bio_endio(struct bio *bio)
bio              2734 fs/btrfs/scrub.c 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
bio              2737 fs/btrfs/scrub.c 	if (bio->bi_status)
bio              2741 fs/btrfs/scrub.c 	bio_put(bio);
bio              2752 fs/btrfs/scrub.c 	struct bio *bio;
bio              2770 fs/btrfs/scrub.c 	bio = btrfs_io_bio_alloc(0);
bio              2771 fs/btrfs/scrub.c 	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio              2772 fs/btrfs/scrub.c 	bio->bi_private = sparity;
bio              2773 fs/btrfs/scrub.c 	bio->bi_end_io = scrub_parity_bio_endio;
bio              2775 fs/btrfs/scrub.c 	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
bio              2787 fs/btrfs/scrub.c 	bio_put(bio);
bio               505 fs/btrfs/volumes.c 			struct bio *head, struct bio *tail)
bio               508 fs/btrfs/volumes.c 	struct bio *old_head;
bio               532 fs/btrfs/volumes.c 	struct bio *pending;
bio               535 fs/btrfs/volumes.c 	struct bio *tail;
bio               536 fs/btrfs/volumes.c 	struct bio *cur;
bio              6399 fs/btrfs/volumes.c static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
bio              6401 fs/btrfs/volumes.c 	bio->bi_private = bbio->private;
bio              6402 fs/btrfs/volumes.c 	bio->bi_end_io = bbio->end_io;
bio              6403 fs/btrfs/volumes.c 	bio_endio(bio);
bio              6408 fs/btrfs/volumes.c static void btrfs_end_bio(struct bio *bio)
bio              6410 fs/btrfs/volumes.c 	struct btrfs_bio *bbio = bio->bi_private;
bio              6413 fs/btrfs/volumes.c 	if (bio->bi_status) {
bio              6415 fs/btrfs/volumes.c 		if (bio->bi_status == BLK_STS_IOERR ||
bio              6416 fs/btrfs/volumes.c 		    bio->bi_status == BLK_STS_TARGET) {
bio              6418 fs/btrfs/volumes.c 				btrfs_io_bio(bio)->stripe_index;
bio              6424 fs/btrfs/volumes.c 				if (bio_op(bio) == REQ_OP_WRITE)
bio              6427 fs/btrfs/volumes.c 				else if (!(bio->bi_opf & REQ_RAHEAD))
bio              6430 fs/btrfs/volumes.c 				if (bio->bi_opf & REQ_PREFLUSH)
bio              6437 fs/btrfs/volumes.c 	if (bio == bbio->orig_bio)
bio              6444 fs/btrfs/volumes.c 			bio_put(bio);
bio              6445 fs/btrfs/volumes.c 			bio = bbio->orig_bio;
bio              6448 fs/btrfs/volumes.c 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio              6453 fs/btrfs/volumes.c 			bio->bi_status = BLK_STS_IOERR;
bio              6459 fs/btrfs/volumes.c 			bio->bi_status = BLK_STS_OK;
bio              6462 fs/btrfs/volumes.c 		btrfs_end_bbio(bbio, bio);
bio              6464 fs/btrfs/volumes.c 		bio_put(bio);
bio              6476 fs/btrfs/volumes.c 					struct bio *bio)
bio              6483 fs/btrfs/volumes.c 	if (bio_op(bio) == REQ_OP_READ) {
bio              6484 fs/btrfs/volumes.c 		btrfsic_submit_bio(bio);
bio              6488 fs/btrfs/volumes.c 	WARN_ON(bio->bi_next);
bio              6489 fs/btrfs/volumes.c 	bio->bi_next = NULL;
bio              6492 fs/btrfs/volumes.c 	if (op_is_sync(bio->bi_opf))
bio              6498 fs/btrfs/volumes.c 		pending_bios->tail->bi_next = bio;
bio              6500 fs/btrfs/volumes.c 	pending_bios->tail = bio;
bio              6502 fs/btrfs/volumes.c 		pending_bios->head = bio;
bio              6512 fs/btrfs/volumes.c static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
bio              6518 fs/btrfs/volumes.c 	bio->bi_private = bbio;
bio              6519 fs/btrfs/volumes.c 	btrfs_io_bio(bio)->stripe_index = dev_nr;
bio              6520 fs/btrfs/volumes.c 	bio->bi_end_io = btrfs_end_bio;
bio              6521 fs/btrfs/volumes.c 	bio->bi_iter.bi_sector = physical >> 9;
bio              6524 fs/btrfs/volumes.c 		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
bio              6526 fs/btrfs/volumes.c 		bio->bi_iter.bi_size);
bio              6527 fs/btrfs/volumes.c 	bio_set_dev(bio, dev->bdev);
bio              6532 fs/btrfs/volumes.c 		btrfs_schedule_bio(dev, bio);
bio              6534 fs/btrfs/volumes.c 		btrfsic_submit_bio(bio);
bio              6537 fs/btrfs/volumes.c static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
bio              6542 fs/btrfs/volumes.c 		WARN_ON(bio != bbio->orig_bio);
bio              6544 fs/btrfs/volumes.c 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio              6545 fs/btrfs/volumes.c 		bio->bi_iter.bi_sector = logical >> 9;
bio              6547 fs/btrfs/volumes.c 			bio->bi_status = BLK_STS_IOERR;
bio              6549 fs/btrfs/volumes.c 			bio->bi_status = BLK_STS_OK;
bio              6550 fs/btrfs/volumes.c 		btrfs_end_bbio(bbio, bio);
bio              6554 fs/btrfs/volumes.c blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio              6558 fs/btrfs/volumes.c 	struct bio *first_bio = bio;
bio              6559 fs/btrfs/volumes.c 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
bio              6567 fs/btrfs/volumes.c 	length = bio->bi_iter.bi_size;
bio              6571 fs/btrfs/volumes.c 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
bio              6586 fs/btrfs/volumes.c 	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
bio              6589 fs/btrfs/volumes.c 		if (bio_op(bio) == REQ_OP_WRITE) {
bio              6590 fs/btrfs/volumes.c 			ret = raid56_parity_write(fs_info, bio, bbio,
bio              6593 fs/btrfs/volumes.c 			ret = raid56_parity_recover(fs_info, bio, bbio,
bio              6619 fs/btrfs/volumes.c 			bio = btrfs_bio_clone(first_bio);
bio              6621 fs/btrfs/volumes.c 			bio = first_bio;
bio              6623 fs/btrfs/volumes.c 		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
bio                22 fs/btrfs/volumes.h 	struct bio *head;
bio                23 fs/btrfs/volumes.h 	struct bio *tail;
bio               128 fs/btrfs/volumes.h 	struct bio *flush_bio;
bio               304 fs/btrfs/volumes.h 	struct bio bio;
bio               307 fs/btrfs/volumes.h static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
bio               309 fs/btrfs/volumes.h 	return container_of(bio, struct btrfs_io_bio, bio);
bio               332 fs/btrfs/volumes.h 	struct bio *orig_bio;
bio               406 fs/btrfs/volumes.h static inline enum btrfs_map_op btrfs_op(struct bio *bio)
bio               408 fs/btrfs/volumes.h 	switch (bio_op(bio)) {
bio               437 fs/btrfs/volumes.h blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio               244 fs/btrfs/zlib.c 	struct bio *orig_bio = cb->orig_bio;
bio               556 fs/btrfs/zstd.c 	struct bio *orig_bio = cb->orig_bio;
bio              2982 fs/buffer.c    static void end_bio_bh_io_sync(struct bio *bio)
bio              2984 fs/buffer.c    	struct buffer_head *bh = bio->bi_private;
bio              2986 fs/buffer.c    	if (unlikely(bio_flagged(bio, BIO_QUIET)))
bio              2989 fs/buffer.c    	bh->b_end_io(bh, !bio->bi_status);
bio              2990 fs/buffer.c    	bio_put(bio);
bio              3005 fs/buffer.c    void guard_bio_eod(struct bio *bio)
bio              3011 fs/buffer.c    	part = __disk_get_part(bio->bi_disk, bio->bi_partno);
bio              3015 fs/buffer.c    		maxsector = get_capacity(bio->bi_disk);
bio              3026 fs/buffer.c    	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
bio              3029 fs/buffer.c    	maxsector -= bio->bi_iter.bi_sector;
bio              3030 fs/buffer.c    	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
bio              3033 fs/buffer.c    	bio_truncate(bio, maxsector << 9);
bio              3039 fs/buffer.c    	struct bio *bio;
bio              3057 fs/buffer.c    	bio = bio_alloc(GFP_NOIO, 1);
bio              3059 fs/buffer.c    	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio              3060 fs/buffer.c    	bio_set_dev(bio, bh->b_bdev);
bio              3061 fs/buffer.c    	bio->bi_write_hint = write_hint;
bio              3063 fs/buffer.c    	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
bio              3064 fs/buffer.c    	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
bio              3066 fs/buffer.c    	bio->bi_end_io = end_bio_bh_io_sync;
bio              3067 fs/buffer.c    	bio->bi_private = bh;
bio              3073 fs/buffer.c    	bio_set_op_attrs(bio, op, op_flags);
bio              3076 fs/buffer.c    	guard_bio_eod(bio);
bio              3079 fs/buffer.c    		wbc_init_bio(wbc, bio);
bio              3083 fs/buffer.c    	submit_bio(bio);
bio                29 fs/crypto/bio.c static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bio                34 fs/crypto/bio.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bio                47 fs/crypto/bio.c void fscrypt_decrypt_bio(struct bio *bio)
bio                49 fs/crypto/bio.c 	__fscrypt_decrypt_bio(bio, false);
bio                56 fs/crypto/bio.c 	struct bio *bio = ctx->bio;
bio                58 fs/crypto/bio.c 	__fscrypt_decrypt_bio(bio, true);
bio                60 fs/crypto/bio.c 	bio_put(bio);
bio                63 fs/crypto/bio.c void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
bio                66 fs/crypto/bio.c 	ctx->bio = bio;
bio                77 fs/crypto/bio.c 	struct bio *bio;
bio                91 fs/crypto/bio.c 		bio = bio_alloc(GFP_NOWAIT, 1);
bio                92 fs/crypto/bio.c 		if (!bio) {
bio                96 fs/crypto/bio.c 		bio_set_dev(bio, inode->i_sb->s_bdev);
bio                97 fs/crypto/bio.c 		bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio                98 fs/crypto/bio.c 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio                99 fs/crypto/bio.c 		ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
bio               102 fs/crypto/bio.c 			bio_put(bio);
bio               106 fs/crypto/bio.c 		err = submit_bio_wait(bio);
bio               107 fs/crypto/bio.c 		if (err == 0 && bio->bi_status)
bio               109 fs/crypto/bio.c 		bio_put(bio);
bio                69 fs/direct-io.c 	struct bio *bio;		/* bio under assembly */
bio               136 fs/direct-io.c 	struct bio *bio_list;		/* singly linked via bi_private */
bio               344 fs/direct-io.c static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
bio               349 fs/direct-io.c static void dio_bio_end_aio(struct bio *bio)
bio               351 fs/direct-io.c 	struct dio *dio = bio->bi_private;
bio               357 fs/direct-io.c 	dio_bio_complete(dio, bio);
bio               395 fs/direct-io.c static void dio_bio_end_io(struct bio *bio)
bio               397 fs/direct-io.c 	struct dio *dio = bio->bi_private;
bio               401 fs/direct-io.c 	bio->bi_private = dio->bio_list;
bio               402 fs/direct-io.c 	dio->bio_list = bio;
bio               416 fs/direct-io.c void dio_end_io(struct bio *bio)
bio               418 fs/direct-io.c 	struct dio *dio = bio->bi_private;
bio               421 fs/direct-io.c 		dio_bio_end_aio(bio);
bio               423 fs/direct-io.c 		dio_bio_end_io(bio);
bio               432 fs/direct-io.c 	struct bio *bio;
bio               438 fs/direct-io.c 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio               440 fs/direct-io.c 	bio_set_dev(bio, bdev);
bio               441 fs/direct-io.c 	bio->bi_iter.bi_sector = first_sector;
bio               442 fs/direct-io.c 	bio_set_op_attrs(bio, dio->op, dio->op_flags);
bio               444 fs/direct-io.c 		bio->bi_end_io = dio_bio_end_aio;
bio               446 fs/direct-io.c 		bio->bi_end_io = dio_bio_end_io;
bio               448 fs/direct-io.c 	bio->bi_write_hint = dio->iocb->ki_hint;
bio               450 fs/direct-io.c 	sdio->bio = bio;
bio               463 fs/direct-io.c 	struct bio *bio = sdio->bio;
bio               466 fs/direct-io.c 	bio->bi_private = dio;
bio               473 fs/direct-io.c 		bio_set_pages_dirty(bio);
bio               475 fs/direct-io.c 	dio->bio_disk = bio->bi_disk;
bio               478 fs/direct-io.c 		sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
bio               481 fs/direct-io.c 		dio->bio_cookie = submit_bio(bio);
bio               483 fs/direct-io.c 	sdio->bio = NULL;
bio               503 fs/direct-io.c static struct bio *dio_await_one(struct dio *dio)
bio               506 fs/direct-io.c 	struct bio *bio = NULL;
bio               528 fs/direct-io.c 		bio = dio->bio_list;
bio               529 fs/direct-io.c 		dio->bio_list = bio->bi_private;
bio               532 fs/direct-io.c 	return bio;
bio               538 fs/direct-io.c static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
bio               540 fs/direct-io.c 	blk_status_t err = bio->bi_status;
bio               544 fs/direct-io.c 		if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
bio               551 fs/direct-io.c 		bio_check_pages_dirty(bio);	/* transfers ownership */
bio               553 fs/direct-io.c 		bio_release_pages(bio, should_dirty);
bio               554 fs/direct-io.c 		bio_put(bio);
bio               568 fs/direct-io.c 	struct bio *bio;
bio               570 fs/direct-io.c 		bio = dio_await_one(dio);
bio               571 fs/direct-io.c 		if (bio)
bio               572 fs/direct-io.c 			dio_bio_complete(dio, bio);
bio               573 fs/direct-io.c 	} while (bio);
bio               590 fs/direct-io.c 			struct bio *bio;
bio               594 fs/direct-io.c 			bio = dio->bio_list;
bio               595 fs/direct-io.c 			dio->bio_list = bio->bi_private;
bio               597 fs/direct-io.c 			ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
bio               753 fs/direct-io.c 	ret = bio_add_page(sdio->bio, sdio->cur_page,
bio               786 fs/direct-io.c 	if (sdio->bio) {
bio               789 fs/direct-io.c 			sdio->bio->bi_iter.bi_size;
bio               810 fs/direct-io.c 	if (sdio->bio == NULL) {
bio               894 fs/direct-io.c 		if (sdio->bio)
bio              1352 fs/direct-io.c 	if (sdio.bio)
bio                12 fs/erofs/data.c static void erofs_readendio(struct bio *bio)
bio                15 fs/erofs/data.c 	blk_status_t err = bio->bi_status;
bio                18 fs/erofs/data.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio                32 fs/erofs/data.c 	bio_put(bio);
bio               127 fs/erofs/data.c static inline struct bio *erofs_read_raw_page(struct bio *bio,
bio               147 fs/erofs/data.c 	if (bio &&
bio               151 fs/erofs/data.c 		submit_bio(bio);
bio               152 fs/erofs/data.c 		bio = NULL;
bio               155 fs/erofs/data.c 	if (!bio) {
bio               221 fs/erofs/data.c 		bio = bio_alloc(GFP_NOIO, nblocks);
bio               223 fs/erofs/data.c 		bio->bi_end_io = erofs_readendio;
bio               224 fs/erofs/data.c 		bio_set_dev(bio, sb->s_bdev);
bio               225 fs/erofs/data.c 		bio->bi_iter.bi_sector = (sector_t)blknr <<
bio               227 fs/erofs/data.c 		bio->bi_opf = REQ_OP_READ;
bio               230 fs/erofs/data.c 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
bio               238 fs/erofs/data.c 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
bio               244 fs/erofs/data.c 	return bio;
bio               256 fs/erofs/data.c 	if (bio)
bio               258 fs/erofs/data.c 		submit_bio(bio);
bio               269 fs/erofs/data.c 	struct bio *bio;
bio               273 fs/erofs/data.c 	bio = erofs_read_raw_page(NULL, page->mapping,
bio               276 fs/erofs/data.c 	if (IS_ERR(bio))
bio               277 fs/erofs/data.c 		return PTR_ERR(bio);
bio               279 fs/erofs/data.c 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
bio               289 fs/erofs/data.c 	struct bio *bio = NULL;
bio               302 fs/erofs/data.c 			bio = erofs_read_raw_page(bio, mapping, page,
bio               306 fs/erofs/data.c 			if (IS_ERR(bio)) {
bio               311 fs/erofs/data.c 				bio = NULL;
bio               321 fs/erofs/data.c 	if (bio)
bio               322 fs/erofs/data.c 		submit_bio(bio);
bio               721 fs/erofs/zdata.c static inline void z_erofs_vle_read_endio(struct bio *bio)
bio               724 fs/erofs/zdata.c 	blk_status_t err = bio->bi_status;
bio               728 fs/erofs/zdata.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               751 fs/erofs/zdata.c 	z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
bio               752 fs/erofs/zdata.c 	bio_put(bio);
bio              1211 fs/erofs/zdata.c 	struct bio *bio;
bio              1222 fs/erofs/zdata.c 	bio = NULL;
bio              1262 fs/erofs/zdata.c 		if (bio && force_submit) {
bio              1264 fs/erofs/zdata.c 			submit_bio(bio);
bio              1265 fs/erofs/zdata.c 			bio = NULL;
bio              1268 fs/erofs/zdata.c 		if (!bio) {
bio              1269 fs/erofs/zdata.c 			bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio              1271 fs/erofs/zdata.c 			bio->bi_end_io = z_erofs_vle_read_endio;
bio              1272 fs/erofs/zdata.c 			bio_set_dev(bio, sb->s_bdev);
bio              1273 fs/erofs/zdata.c 			bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
bio              1275 fs/erofs/zdata.c 			bio->bi_private = bi_private;
bio              1276 fs/erofs/zdata.c 			bio->bi_opf = REQ_OP_READ;
bio              1281 fs/erofs/zdata.c 		err = bio_add_page(bio, page, PAGE_SIZE, 0);
bio              1297 fs/erofs/zdata.c 	if (bio)
bio              1298 fs/erofs/zdata.c 		submit_bio(bio);
bio               210 fs/ext4/ext4.h 	struct bio		*bio;		/* Linked list of completed
bio               220 fs/ext4/ext4.h 	struct bio		*io_bio;
bio              2780 fs/ext4/mballoc.c 		struct bio **biop)
bio              2856 fs/ext4/mballoc.c 	struct bio *discard_bio = NULL;
bio                62 fs/ext4/page-io.c static void ext4_finish_bio(struct bio *bio)
bio                67 fs/ext4/page-io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio                84 fs/ext4/page-io.c 		if (bio->bi_status) {
bio               103 fs/ext4/page-io.c 			if (bio->bi_status)
bio               117 fs/ext4/page-io.c 	struct bio *bio, *next_bio;
bio               123 fs/ext4/page-io.c 	for (bio = io_end->bio; bio; bio = next_bio) {
bio               124 fs/ext4/page-io.c 		next_bio = bio->bi_private;
bio               125 fs/ext4/page-io.c 		ext4_finish_bio(bio);
bio               126 fs/ext4/page-io.c 		bio_put(bio);
bio               289 fs/ext4/page-io.c static void ext4_end_bio(struct bio *bio)
bio               291 fs/ext4/page-io.c 	ext4_io_end_t *io_end = bio->bi_private;
bio               292 fs/ext4/page-io.c 	sector_t bi_sector = bio->bi_iter.bi_sector;
bio               296 fs/ext4/page-io.c 		      bio_devname(bio, b),
bio               297 fs/ext4/page-io.c 		      (long long) bio->bi_iter.bi_sector,
bio               298 fs/ext4/page-io.c 		      (unsigned) bio_sectors(bio),
bio               299 fs/ext4/page-io.c 		      bio->bi_status)) {
bio               300 fs/ext4/page-io.c 		ext4_finish_bio(bio);
bio               301 fs/ext4/page-io.c 		bio_put(bio);
bio               304 fs/ext4/page-io.c 	bio->bi_end_io = NULL;
bio               306 fs/ext4/page-io.c 	if (bio->bi_status) {
bio               311 fs/ext4/page-io.c 			     bio->bi_status, inode->i_ino,
bio               317 fs/ext4/page-io.c 				blk_status_to_errno(bio->bi_status));
bio               326 fs/ext4/page-io.c 		bio->bi_private = xchg(&io_end->bio, bio);
bio               334 fs/ext4/page-io.c 		ext4_finish_bio(bio);
bio               335 fs/ext4/page-io.c 		bio_put(bio);
bio               341 fs/ext4/page-io.c 	struct bio *bio = io->io_bio;
bio               343 fs/ext4/page-io.c 	if (bio) {
bio               364 fs/ext4/page-io.c 	struct bio *bio;
bio               366 fs/ext4/page-io.c 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio               367 fs/ext4/page-io.c 	if (!bio)
bio               369 fs/ext4/page-io.c 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio               370 fs/ext4/page-io.c 	bio_set_dev(bio, bh->b_bdev);
bio               371 fs/ext4/page-io.c 	bio->bi_end_io = ext4_end_bio;
bio               372 fs/ext4/page-io.c 	bio->bi_private = ext4_get_io_end(io->io_end);
bio               373 fs/ext4/page-io.c 	io->io_bio = bio;
bio               375 fs/ext4/page-io.c 	wbc_init_bio(io->io_wbc, bio);
bio                64 fs/ext4/readpage.c 	struct bio *bio;
bio                70 fs/ext4/readpage.c static void __read_end_io(struct bio *bio)
bio                76 fs/ext4/readpage.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bio                80 fs/ext4/readpage.c 		if (bio->bi_status || PageError(page)) {
bio                89 fs/ext4/readpage.c 	if (bio->bi_private)
bio                90 fs/ext4/readpage.c 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio                91 fs/ext4/readpage.c 	bio_put(bio);
bio               101 fs/ext4/readpage.c 	fscrypt_decrypt_bio(ctx->bio);
bio               110 fs/ext4/readpage.c 	struct bio *bio = ctx->bio;
bio               121 fs/ext4/readpage.c 	bio->bi_private = NULL;
bio               123 fs/ext4/readpage.c 	fsverity_verify_bio(bio);
bio               125 fs/ext4/readpage.c 	__read_end_io(bio);
bio               153 fs/ext4/readpage.c 		__read_end_io(ctx->bio);
bio               157 fs/ext4/readpage.c static bool bio_post_read_required(struct bio *bio)
bio               159 fs/ext4/readpage.c 	return bio->bi_private && !bio->bi_status;
bio               174 fs/ext4/readpage.c static void mpage_end_io(struct bio *bio)
bio               176 fs/ext4/readpage.c 	if (bio_post_read_required(bio)) {
bio               177 fs/ext4/readpage.c 		struct bio_post_read_ctx *ctx = bio->bi_private;
bio               183 fs/ext4/readpage.c 	__read_end_io(bio);
bio               193 fs/ext4/readpage.c 						       struct bio *bio,
bio               209 fs/ext4/readpage.c 		ctx->bio = bio;
bio               211 fs/ext4/readpage.c 		bio->bi_private = ctx;
bio               229 fs/ext4/readpage.c 	struct bio *bio = NULL;
bio               368 fs/ext4/readpage.c 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
bio               370 fs/ext4/readpage.c 			submit_bio(bio);
bio               371 fs/ext4/readpage.c 			bio = NULL;
bio               373 fs/ext4/readpage.c 		if (bio == NULL) {
bio               376 fs/ext4/readpage.c 			bio = bio_alloc(GFP_KERNEL,
bio               378 fs/ext4/readpage.c 			if (!bio)
bio               380 fs/ext4/readpage.c 			ctx = get_bio_post_read_ctx(inode, bio, page->index);
bio               382 fs/ext4/readpage.c 				bio_put(bio);
bio               383 fs/ext4/readpage.c 				bio = NULL;
bio               386 fs/ext4/readpage.c 			bio_set_dev(bio, bdev);
bio               387 fs/ext4/readpage.c 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio               388 fs/ext4/readpage.c 			bio->bi_end_io = mpage_end_io;
bio               389 fs/ext4/readpage.c 			bio->bi_private = ctx;
bio               390 fs/ext4/readpage.c 			bio_set_op_attrs(bio, REQ_OP_READ,
bio               395 fs/ext4/readpage.c 		if (bio_add_page(bio, page, length, 0) < length)
bio               401 fs/ext4/readpage.c 			submit_bio(bio);
bio               402 fs/ext4/readpage.c 			bio = NULL;
bio               407 fs/ext4/readpage.c 		if (bio) {
bio               408 fs/ext4/readpage.c 			submit_bio(bio);
bio               409 fs/ext4/readpage.c 			bio = NULL;
bio               420 fs/ext4/readpage.c 	if (bio)
bio               421 fs/ext4/readpage.c 		submit_bio(bio);
bio                81 fs/f2fs/data.c 	struct bio *bio;
bio                87 fs/f2fs/data.c static void __read_end_io(struct bio *bio)
bio                93 fs/f2fs/data.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bio                97 fs/f2fs/data.c 		if (bio->bi_status || PageError(page)) {
bio               107 fs/f2fs/data.c 	if (bio->bi_private)
bio               108 fs/f2fs/data.c 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio               109 fs/f2fs/data.c 	bio_put(bio);
bio               119 fs/f2fs/data.c 	fscrypt_decrypt_bio(ctx->bio);
bio               129 fs/f2fs/data.c 	fsverity_verify_bio(ctx->bio);
bio               159 fs/f2fs/data.c 		__read_end_io(ctx->bio);
bio               163 fs/f2fs/data.c static bool f2fs_bio_post_read_required(struct bio *bio)
bio               165 fs/f2fs/data.c 	return bio->bi_private && !bio->bi_status;
bio               168 fs/f2fs/data.c static void f2fs_read_end_io(struct bio *bio)
bio               170 fs/f2fs/data.c 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
bio               173 fs/f2fs/data.c 		bio->bi_status = BLK_STS_IOERR;
bio               176 fs/f2fs/data.c 	if (f2fs_bio_post_read_required(bio)) {
bio               177 fs/f2fs/data.c 		struct bio_post_read_ctx *ctx = bio->bi_private;
bio               184 fs/f2fs/data.c 	__read_end_io(bio);
bio               187 fs/f2fs/data.c static void f2fs_write_end_io(struct bio *bio)
bio               189 fs/f2fs/data.c 	struct f2fs_sb_info *sbi = bio->bi_private;
bio               195 fs/f2fs/data.c 		bio->bi_status = BLK_STS_IOERR;
bio               198 fs/f2fs/data.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               208 fs/f2fs/data.c 			if (unlikely(bio->bi_status))
bio               215 fs/f2fs/data.c 		if (unlikely(bio->bi_status)) {
bio               234 fs/f2fs/data.c 	bio_put(bio);
bio               241 fs/f2fs/data.c 				block_t blk_addr, struct bio *bio)
bio               256 fs/f2fs/data.c 	if (bio) {
bio               257 fs/f2fs/data.c 		bio_set_dev(bio, bdev);
bio               258 fs/f2fs/data.c 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
bio               277 fs/f2fs/data.c 				block_t blk_addr, struct bio *bio)
bio               280 fs/f2fs/data.c 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
bio               286 fs/f2fs/data.c static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
bio               289 fs/f2fs/data.c 	struct bio *bio;
bio               291 fs/f2fs/data.c 	bio = f2fs_bio_alloc(sbi, npages, true);
bio               293 fs/f2fs/data.c 	f2fs_target_device(sbi, fio->new_blkaddr, bio);
bio               295 fs/f2fs/data.c 		bio->bi_end_io = f2fs_read_end_io;
bio               296 fs/f2fs/data.c 		bio->bi_private = NULL;
bio               298 fs/f2fs/data.c 		bio->bi_end_io = f2fs_write_end_io;
bio               299 fs/f2fs/data.c 		bio->bi_private = sbi;
bio               300 fs/f2fs/data.c 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
bio               304 fs/f2fs/data.c 		wbc_init_bio(fio->io_wbc, bio);
bio               306 fs/f2fs/data.c 	return bio;
bio               310 fs/f2fs/data.c 				struct bio *bio, enum page_type type)
bio               312 fs/f2fs/data.c 	if (!is_read_io(bio_op(bio))) {
bio               324 fs/f2fs/data.c 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
bio               341 fs/f2fs/data.c 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
bio               352 fs/f2fs/data.c 	if (is_read_io(bio_op(bio)))
bio               353 fs/f2fs/data.c 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
bio               355 fs/f2fs/data.c 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
bio               356 fs/f2fs/data.c 	submit_bio(bio);
bio               363 fs/f2fs/data.c 	if (!io->bio)
bio               366 fs/f2fs/data.c 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
bio               369 fs/f2fs/data.c 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
bio               371 fs/f2fs/data.c 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
bio               373 fs/f2fs/data.c 	__submit_bio(io->sbi, io->bio, fio->type);
bio               374 fs/f2fs/data.c 	io->bio = NULL;
bio               377 fs/f2fs/data.c static bool __has_merged_page(struct bio *bio, struct inode *inode,
bio               384 fs/f2fs/data.c 	if (!bio)
bio               390 fs/f2fs/data.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               440 fs/f2fs/data.c 			ret = __has_merged_page(io->bio, inode, page, ino);
bio               477 fs/f2fs/data.c 	struct bio *bio;
bio               490 fs/f2fs/data.c 	bio = __bio_alloc(fio, 1);
bio               492 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio               493 fs/f2fs/data.c 		bio_put(bio);
bio               500 fs/f2fs/data.c 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
bio               505 fs/f2fs/data.c 	__submit_bio(fio->sbi, bio, fio->type);
bio               509 fs/f2fs/data.c static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
bio               514 fs/f2fs/data.c 	return __same_bdev(sbi, cur_blkaddr, bio);
bio               525 fs/f2fs/data.c static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
bio               533 fs/f2fs/data.c 				F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
bio               535 fs/f2fs/data.c 		unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
bio               541 fs/f2fs/data.c 	if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
bio               548 fs/f2fs/data.c 	struct bio *bio = *fio->bio;
bio               559 fs/f2fs/data.c 	if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
bio               561 fs/f2fs/data.c 		__submit_bio(fio->sbi, bio, fio->type);
bio               562 fs/f2fs/data.c 		bio = NULL;
bio               565 fs/f2fs/data.c 	if (!bio) {
bio               566 fs/f2fs/data.c 		bio = __bio_alloc(fio, BIO_MAX_PAGES);
bio               567 fs/f2fs/data.c 		bio_set_op_attrs(bio, fio->op, fio->op_flags);
bio               570 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio               571 fs/f2fs/data.c 		__submit_bio(fio->sbi, bio, fio->type);
bio               572 fs/f2fs/data.c 		bio = NULL;
bio               582 fs/f2fs/data.c 	*fio->bio = bio;
bio               587 fs/f2fs/data.c static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
bio               590 fs/f2fs/data.c 	if (!bio)
bio               593 fs/f2fs/data.c 	if (!__has_merged_page(*bio, NULL, page, 0))
bio               596 fs/f2fs/data.c 	__submit_bio(sbi, *bio, DATA);
bio               597 fs/f2fs/data.c 	*bio = NULL;
bio               632 fs/f2fs/data.c 	if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio,
bio               636 fs/f2fs/data.c 	if (io->bio == NULL) {
bio               644 fs/f2fs/data.c 		io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
bio               648 fs/f2fs/data.c 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio               676 fs/f2fs/data.c static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
bio               681 fs/f2fs/data.c 	struct bio *bio;
bio               685 fs/f2fs/data.c 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
bio               686 fs/f2fs/data.c 	if (!bio)
bio               688 fs/f2fs/data.c 	f2fs_target_device(sbi, blkaddr, bio);
bio               689 fs/f2fs/data.c 	bio->bi_end_io = f2fs_read_end_io;
bio               690 fs/f2fs/data.c 	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
bio               701 fs/f2fs/data.c 			bio_put(bio);
bio               704 fs/f2fs/data.c 		ctx->bio = bio;
bio               706 fs/f2fs/data.c 		bio->bi_private = ctx;
bio               709 fs/f2fs/data.c 	return bio;
bio               717 fs/f2fs/data.c 	struct bio *bio;
bio               719 fs/f2fs/data.c 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index);
bio               720 fs/f2fs/data.c 	if (IS_ERR(bio))
bio               721 fs/f2fs/data.c 		return PTR_ERR(bio);
bio               726 fs/f2fs/data.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio               727 fs/f2fs/data.c 		bio_put(bio);
bio               732 fs/f2fs/data.c 	__submit_bio(sbi, bio, DATA);
bio              1641 fs/f2fs/data.c 					struct bio **bio_ret,
bio              1645 fs/f2fs/data.c 	struct bio *bio = *bio_ret;
bio              1716 fs/f2fs/data.c 	if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio,
bio              1719 fs/f2fs/data.c 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio              1720 fs/f2fs/data.c 		bio = NULL;
bio              1722 fs/f2fs/data.c 	if (bio == NULL) {
bio              1723 fs/f2fs/data.c 		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
bio              1725 fs/f2fs/data.c 		if (IS_ERR(bio)) {
bio              1726 fs/f2fs/data.c 			ret = PTR_ERR(bio);
bio              1727 fs/f2fs/data.c 			bio = NULL;
bio              1738 fs/f2fs/data.c 	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
bio              1746 fs/f2fs/data.c 	if (bio) {
bio              1747 fs/f2fs/data.c 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio              1748 fs/f2fs/data.c 		bio = NULL;
bio              1752 fs/f2fs/data.c 	*bio_ret = bio;
bio              1769 fs/f2fs/data.c 	struct bio *bio = NULL;
bio              1796 fs/f2fs/data.c 		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
bio              1808 fs/f2fs/data.c 	if (bio)
bio              1809 fs/f2fs/data.c 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio              2078 fs/f2fs/data.c 				struct bio **bio,
bio              2105 fs/f2fs/data.c 		.bio = bio,
bio              2206 fs/f2fs/data.c 		f2fs_submit_ipu_bio(sbi, bio, page);
bio              2211 fs/f2fs/data.c 		f2fs_submit_ipu_bio(sbi, bio, page);
bio              2254 fs/f2fs/data.c 	struct bio *bio = NULL;
bio              2335 fs/f2fs/data.c 					f2fs_submit_ipu_bio(sbi, &bio, page);
bio              2344 fs/f2fs/data.c 			ret = __write_data_page(page, &submitted, &bio,
bio              2395 fs/f2fs/data.c 	if (bio)
bio              2396 fs/f2fs/data.c 		__submit_bio(sbi, bio, DATA);
bio              2767 fs/f2fs/data.c static void f2fs_dio_end_io(struct bio *bio)
bio              2769 fs/f2fs/data.c 	struct f2fs_private_dio *dio = bio->bi_private;
bio              2774 fs/f2fs/data.c 	bio->bi_private = dio->orig_private;
bio              2775 fs/f2fs/data.c 	bio->bi_end_io = dio->orig_end_io;
bio              2779 fs/f2fs/data.c 	bio_endio(bio);
bio              2782 fs/f2fs/data.c static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
bio              2786 fs/f2fs/data.c 	bool write = (bio_op(bio) == REQ_OP_WRITE);
bio              2794 fs/f2fs/data.c 	dio->orig_end_io = bio->bi_end_io;
bio              2795 fs/f2fs/data.c 	dio->orig_private = bio->bi_private;
bio              2798 fs/f2fs/data.c 	bio->bi_end_io = f2fs_dio_end_io;
bio              2799 fs/f2fs/data.c 	bio->bi_private = dio;
bio              2804 fs/f2fs/data.c 	submit_bio(bio);
bio              2807 fs/f2fs/data.c 	bio->bi_status = BLK_STS_IOERR;
bio              2808 fs/f2fs/data.c 	bio_endio(bio);
bio              1067 fs/f2fs/f2fs.h 	struct bio **bio;		/* bio for ipu */
bio              1075 fs/f2fs/f2fs.h 	struct bio *bio;		/* bios to merge */
bio              2215 fs/f2fs/f2fs.h static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
bio              2218 fs/f2fs/f2fs.h 	struct bio *bio;
bio              2222 fs/f2fs/f2fs.h 		bio = bio_alloc(GFP_NOIO, npages);
bio              2223 fs/f2fs/f2fs.h 		if (!bio)
bio              2224 fs/f2fs/f2fs.h 			bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
bio              2225 fs/f2fs/f2fs.h 		return bio;
bio              3209 fs/f2fs/f2fs.h 			block_t blk_addr, struct bio *bio);
bio               561 fs/f2fs/segment.c 	struct bio *bio;
bio               564 fs/f2fs/segment.c 	bio = f2fs_bio_alloc(sbi, 0, false);
bio               565 fs/f2fs/segment.c 	if (!bio)
bio               568 fs/f2fs/segment.c 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio               569 fs/f2fs/segment.c 	bio_set_dev(bio, bdev);
bio               570 fs/f2fs/segment.c 	ret = submit_bio_wait(bio);
bio               571 fs/f2fs/segment.c 	bio_put(bio);
bio              1025 fs/f2fs/segment.c static void f2fs_submit_discard_endio(struct bio *bio)
bio              1027 fs/f2fs/segment.c 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
bio              1030 fs/f2fs/segment.c 	dc->error = blk_status_to_errno(bio->bi_status);
bio              1039 fs/f2fs/segment.c 	bio_put(bio);
bio              1145 fs/f2fs/segment.c 		struct bio *bio = NULL;
bio              1168 fs/f2fs/segment.c 					GFP_NOFS, 0, &bio);
bio              1179 fs/f2fs/segment.c 		f2fs_bug_on(sbi, !bio);
bio              1200 fs/f2fs/segment.c 		bio->bi_private = dc;
bio              1201 fs/f2fs/segment.c 		bio->bi_end_io = f2fs_submit_discard_endio;
bio              1202 fs/f2fs/segment.c 		bio->bi_opf |= flag;
bio              1203 fs/f2fs/segment.c 		submit_bio(bio);
bio              3267 fs/f2fs/segment.c 	if (fio->bio)
bio              3334 fs/f2fs/super.c 			sbi->write_io[i][j].bio = NULL;
bio               829 fs/gfs2/incore.h 	struct bio *sd_log_bio;
bio               203 fs/gfs2/lops.c static void gfs2_end_log_write(struct bio *bio)
bio               205 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = bio->bi_private;
bio               210 fs/gfs2/lops.c 	if (bio->bi_status) {
bio               212 fs/gfs2/lops.c 		       bio->bi_status, sdp->sd_jdesc->jd_jid);
bio               216 fs/gfs2/lops.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               219 fs/gfs2/lops.c 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
bio               224 fs/gfs2/lops.c 	bio_put(bio);
bio               238 fs/gfs2/lops.c void gfs2_log_submit_bio(struct bio **biop, int opf)
bio               240 fs/gfs2/lops.c 	struct bio *bio = *biop;
bio               241 fs/gfs2/lops.c 	if (bio) {
bio               242 fs/gfs2/lops.c 		struct gfs2_sbd *sdp = bio->bi_private;
bio               244 fs/gfs2/lops.c 		bio->bi_opf = opf;
bio               245 fs/gfs2/lops.c 		submit_bio(bio);
bio               261 fs/gfs2/lops.c static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio               265 fs/gfs2/lops.c 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio               267 fs/gfs2/lops.c 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
bio               268 fs/gfs2/lops.c 	bio_set_dev(bio, sb->s_bdev);
bio               269 fs/gfs2/lops.c 	bio->bi_end_io = end_io;
bio               270 fs/gfs2/lops.c 	bio->bi_private = sdp;
bio               272 fs/gfs2/lops.c 	return bio;
bio               292 fs/gfs2/lops.c static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
bio               293 fs/gfs2/lops.c 				    struct bio **biop, int op,
bio               296 fs/gfs2/lops.c 	struct bio *bio = *biop;
bio               298 fs/gfs2/lops.c 	if (bio) {
bio               301 fs/gfs2/lops.c 		nblk = bio_end_sector(bio);
bio               304 fs/gfs2/lops.c 			return bio;
bio               328 fs/gfs2/lops.c 	struct bio *bio;
bio               331 fs/gfs2/lops.c 	bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
bio               333 fs/gfs2/lops.c 	ret = bio_add_page(bio, page, size, offset);
bio               335 fs/gfs2/lops.c 		bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
bio               337 fs/gfs2/lops.c 		ret = bio_add_page(bio, page, size, offset);
bio               384 fs/gfs2/lops.c static void gfs2_end_log_read(struct bio *bio)
bio               390 fs/gfs2/lops.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               392 fs/gfs2/lops.c 		if (bio->bi_status) {
bio               393 fs/gfs2/lops.c 			int err = blk_status_to_errno(bio->bi_status);
bio               401 fs/gfs2/lops.c 	bio_put(bio);
bio               474 fs/gfs2/lops.c static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
bio               476 fs/gfs2/lops.c 	struct bio *new;
bio               510 fs/gfs2/lops.c 	struct bio *bio = NULL;
bio               535 fs/gfs2/lops.c 			if (bio && (off || block < blocks_submitted + max_blocks)) {
bio               538 fs/gfs2/lops.c 				if (bio_end_sector(bio) == sector) {
bio               539 fs/gfs2/lops.c 					sz = bio_add_page(bio, page, bsize, off);
bio               547 fs/gfs2/lops.c 					bio = gfs2_chain_bio(bio, blocks);
bio               552 fs/gfs2/lops.c 			if (bio) {
bio               554 fs/gfs2/lops.c 				submit_bio(bio);
bio               557 fs/gfs2/lops.c 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio               558 fs/gfs2/lops.c 			bio->bi_opf = REQ_OP_READ;
bio               560 fs/gfs2/lops.c 			sz = bio_add_page(bio, page, bsize, off);
bio               579 fs/gfs2/lops.c 	if (bio)
bio               580 fs/gfs2/lops.c 		submit_bio(bio);
bio                25 fs/gfs2/lops.h extern void gfs2_log_submit_bio(struct bio **biop, int opf);
bio               186 fs/gfs2/meta_io.c static void gfs2_meta_read_endio(struct bio *bio)
bio               191 fs/gfs2/meta_io.c 	bio_for_each_segment_all(bvec, bio, iter_all) {
bio               201 fs/gfs2/meta_io.c 			bh->b_end_io(bh, !bio->bi_status);
bio               205 fs/gfs2/meta_io.c 	bio_put(bio);
bio               217 fs/gfs2/meta_io.c 		struct bio *bio;
bio               219 fs/gfs2/meta_io.c 		bio = bio_alloc(GFP_NOIO, num);
bio               220 fs/gfs2/meta_io.c 		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio               221 fs/gfs2/meta_io.c 		bio_set_dev(bio, bh->b_bdev);
bio               224 fs/gfs2/meta_io.c 			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
bio               225 fs/gfs2/meta_io.c 				BUG_ON(bio->bi_iter.bi_size == 0);
bio               231 fs/gfs2/meta_io.c 		bio->bi_end_io = gfs2_meta_read_endio;
bio               232 fs/gfs2/meta_io.c 		bio_set_op_attrs(bio, op, op_flags);
bio               233 fs/gfs2/meta_io.c 		submit_bio(bio);
bio               183 fs/gfs2/ops_fstype.c static void end_bio_io_page(struct bio *bio)
bio               185 fs/gfs2/ops_fstype.c 	struct page *page = bio->bi_private;
bio               187 fs/gfs2/ops_fstype.c 	if (!bio->bi_status)
bio               190 fs/gfs2/ops_fstype.c 		pr_warn("error %d reading superblock\n", bio->bi_status);
bio               241 fs/gfs2/ops_fstype.c 	struct bio *bio;
bio               251 fs/gfs2/ops_fstype.c 	bio = bio_alloc(GFP_NOFS, 1);
bio               252 fs/gfs2/ops_fstype.c 	bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio               253 fs/gfs2/ops_fstype.c 	bio_set_dev(bio, sb->s_bdev);
bio               254 fs/gfs2/ops_fstype.c 	bio_add_page(bio, page, PAGE_SIZE, 0);
bio               256 fs/gfs2/ops_fstype.c 	bio->bi_end_io = end_bio_io_page;
bio               257 fs/gfs2/ops_fstype.c 	bio->bi_private = page;
bio               258 fs/gfs2/ops_fstype.c 	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
bio               259 fs/gfs2/ops_fstype.c 	submit_bio(bio);
bio               261 fs/gfs2/ops_fstype.c 	bio_put(bio);
bio                51 fs/hfsplus/wrapper.c 	struct bio *bio;
bio                67 fs/hfsplus/wrapper.c 	bio = bio_alloc(GFP_NOIO, 1);
bio                68 fs/hfsplus/wrapper.c 	bio->bi_iter.bi_sector = sector;
bio                69 fs/hfsplus/wrapper.c 	bio_set_dev(bio, sb->s_bdev);
bio                70 fs/hfsplus/wrapper.c 	bio_set_op_attrs(bio, op, op_flags);
bio                80 fs/hfsplus/wrapper.c 		ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
bio                89 fs/hfsplus/wrapper.c 	ret = submit_bio_wait(bio);
bio                91 fs/hfsplus/wrapper.c 	bio_put(bio);
bio                41 fs/internal.h  extern void guard_bio_eod(struct bio *bio);
bio               167 fs/iomap/buffered-io.c iomap_read_end_io(struct bio *bio)
bio               169 fs/iomap/buffered-io.c 	int error = blk_status_to_errno(bio->bi_status);
bio               173 fs/iomap/buffered-io.c 	bio_for_each_segment_all(bvec, bio, iter_all)
bio               175 fs/iomap/buffered-io.c 	bio_put(bio);
bio               182 fs/iomap/buffered-io.c 	struct bio		*bio;
bio               241 fs/iomap/buffered-io.c 	if (ctx->bio && bio_end_sector(ctx->bio) == sector)
bio               245 fs/iomap/buffered-io.c 	    __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
bio               259 fs/iomap/buffered-io.c 	if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
bio               263 fs/iomap/buffered-io.c 		if (ctx->bio)
bio               264 fs/iomap/buffered-io.c 			submit_bio(ctx->bio);
bio               268 fs/iomap/buffered-io.c 		ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
bio               269 fs/iomap/buffered-io.c 		ctx->bio->bi_opf = REQ_OP_READ;
bio               271 fs/iomap/buffered-io.c 			ctx->bio->bi_opf |= REQ_RAHEAD;
bio               272 fs/iomap/buffered-io.c 		ctx->bio->bi_iter.bi_sector = sector;
bio               273 fs/iomap/buffered-io.c 		bio_set_dev(ctx->bio, iomap->bdev);
bio               274 fs/iomap/buffered-io.c 		ctx->bio->bi_end_io = iomap_read_end_io;
bio               277 fs/iomap/buffered-io.c 	bio_add_page(ctx->bio, page, plen, poff);
bio               307 fs/iomap/buffered-io.c 	if (ctx.bio) {
bio               308 fs/iomap/buffered-io.c 		submit_bio(ctx.bio);
bio               404 fs/iomap/buffered-io.c 	if (ctx.bio)
bio               405 fs/iomap/buffered-io.c 		submit_bio(ctx.bio);
bio               533 fs/iomap/buffered-io.c 	struct bio bio;
bio               541 fs/iomap/buffered-io.c 	bio_init(&bio, &bvec, 1);
bio               542 fs/iomap/buffered-io.c 	bio.bi_opf = REQ_OP_READ;
bio               543 fs/iomap/buffered-io.c 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
bio               544 fs/iomap/buffered-io.c 	bio_set_dev(&bio, iomap->bdev);
bio               545 fs/iomap/buffered-io.c 	__bio_add_page(&bio, page, plen, poff);
bio               546 fs/iomap/buffered-io.c 	return submit_bio_wait(&bio);
bio                62 fs/iomap/direct-io.c 		struct bio *bio)
bio                67 fs/iomap/direct-io.c 		bio_set_polled(bio, dio->iocb);
bio                70 fs/iomap/direct-io.c 	dio->submit.cookie = submit_bio(bio);
bio               146 fs/iomap/direct-io.c static void iomap_dio_bio_end_io(struct bio *bio)
bio               148 fs/iomap/direct-io.c 	struct iomap_dio *dio = bio->bi_private;
bio               151 fs/iomap/direct-io.c 	if (bio->bi_status)
bio               152 fs/iomap/direct-io.c 		iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
bio               170 fs/iomap/direct-io.c 		bio_check_pages_dirty(bio);
bio               172 fs/iomap/direct-io.c 		bio_release_pages(bio, false);
bio               173 fs/iomap/direct-io.c 		bio_put(bio);
bio               183 fs/iomap/direct-io.c 	struct bio *bio;
bio               185 fs/iomap/direct-io.c 	bio = bio_alloc(GFP_KERNEL, 1);
bio               186 fs/iomap/direct-io.c 	bio_set_dev(bio, iomap->bdev);
bio               187 fs/iomap/direct-io.c 	bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio               188 fs/iomap/direct-io.c 	bio->bi_private = dio;
bio               189 fs/iomap/direct-io.c 	bio->bi_end_io = iomap_dio_bio_end_io;
bio               192 fs/iomap/direct-io.c 	__bio_add_page(bio, page, len, 0);
bio               193 fs/iomap/direct-io.c 	bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
bio               194 fs/iomap/direct-io.c 	iomap_dio_submit_bio(dio, iomap, bio);
bio               205 fs/iomap/direct-io.c 	struct bio *bio;
bio               263 fs/iomap/direct-io.c 		bio = bio_alloc(GFP_KERNEL, nr_pages);
bio               264 fs/iomap/direct-io.c 		bio_set_dev(bio, iomap->bdev);
bio               265 fs/iomap/direct-io.c 		bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio               266 fs/iomap/direct-io.c 		bio->bi_write_hint = dio->iocb->ki_hint;
bio               267 fs/iomap/direct-io.c 		bio->bi_ioprio = dio->iocb->ki_ioprio;
bio               268 fs/iomap/direct-io.c 		bio->bi_private = dio;
bio               269 fs/iomap/direct-io.c 		bio->bi_end_io = iomap_dio_bio_end_io;
bio               271 fs/iomap/direct-io.c 		ret = bio_iov_iter_get_pages(bio, &iter);
bio               279 fs/iomap/direct-io.c 			bio_put(bio);
bio               283 fs/iomap/direct-io.c 		n = bio->bi_iter.bi_size;
bio               285 fs/iomap/direct-io.c 			bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
bio               287 fs/iomap/direct-io.c 				bio->bi_opf |= REQ_FUA;
bio               292 fs/iomap/direct-io.c 			bio->bi_opf = REQ_OP_READ;
bio               294 fs/iomap/direct-io.c 				bio_set_pages_dirty(bio);
bio               304 fs/iomap/direct-io.c 		iomap_dio_submit_bio(dio, iomap, bio);
bio              1971 fs/jfs/jfs_logmgr.c 	struct bio *bio;
bio              1982 fs/jfs/jfs_logmgr.c 	bio = bio_alloc(GFP_NOFS, 1);
bio              1984 fs/jfs/jfs_logmgr.c 	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio              1985 fs/jfs/jfs_logmgr.c 	bio_set_dev(bio, log->bdev);
bio              1987 fs/jfs/jfs_logmgr.c 	bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
bio              1988 fs/jfs/jfs_logmgr.c 	BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio              1990 fs/jfs/jfs_logmgr.c 	bio->bi_end_io = lbmIODone;
bio              1991 fs/jfs/jfs_logmgr.c 	bio->bi_private = bp;
bio              1992 fs/jfs/jfs_logmgr.c 	bio->bi_opf = REQ_OP_READ;
bio              1995 fs/jfs/jfs_logmgr.c 		bio->bi_iter.bi_size = 0;
bio              1996 fs/jfs/jfs_logmgr.c 		lbmIODone(bio);
bio              1998 fs/jfs/jfs_logmgr.c 		submit_bio(bio);
bio              2122 fs/jfs/jfs_logmgr.c 	struct bio *bio;
bio              2127 fs/jfs/jfs_logmgr.c 	bio = bio_alloc(GFP_NOFS, 1);
bio              2128 fs/jfs/jfs_logmgr.c 	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio              2129 fs/jfs/jfs_logmgr.c 	bio_set_dev(bio, log->bdev);
bio              2131 fs/jfs/jfs_logmgr.c 	bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
bio              2132 fs/jfs/jfs_logmgr.c 	BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio              2134 fs/jfs/jfs_logmgr.c 	bio->bi_end_io = lbmIODone;
bio              2135 fs/jfs/jfs_logmgr.c 	bio->bi_private = bp;
bio              2136 fs/jfs/jfs_logmgr.c 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio              2140 fs/jfs/jfs_logmgr.c 		bio->bi_iter.bi_size = 0;
bio              2141 fs/jfs/jfs_logmgr.c 		lbmIODone(bio);
bio              2143 fs/jfs/jfs_logmgr.c 		submit_bio(bio);
bio              2179 fs/jfs/jfs_logmgr.c static void lbmIODone(struct bio *bio)
bio              2181 fs/jfs/jfs_logmgr.c 	struct lbuf *bp = bio->bi_private;
bio              2195 fs/jfs/jfs_logmgr.c 	if (bio->bi_status) {
bio              2201 fs/jfs/jfs_logmgr.c 	bio_put(bio);
bio               266 fs/jfs/jfs_metapage.c static void metapage_read_end_io(struct bio *bio)
bio               268 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
bio               270 fs/jfs/jfs_metapage.c 	if (bio->bi_status) {
bio               276 fs/jfs/jfs_metapage.c 	bio_put(bio);
bio               321 fs/jfs/jfs_metapage.c static void metapage_write_end_io(struct bio *bio)
bio               323 fs/jfs/jfs_metapage.c 	struct page *page = bio->bi_private;
bio               327 fs/jfs/jfs_metapage.c 	if (bio->bi_status) {
bio               332 fs/jfs/jfs_metapage.c 	bio_put(bio);
bio               337 fs/jfs/jfs_metapage.c 	struct bio *bio = NULL;
bio               382 fs/jfs/jfs_metapage.c 		if (bio) {
bio               391 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, bio_bytes, bio_offset) <
bio               399 fs/jfs/jfs_metapage.c 			if (!bio->bi_iter.bi_size)
bio               401 fs/jfs/jfs_metapage.c 			submit_bio(bio);
bio               403 fs/jfs/jfs_metapage.c 			bio = NULL;
bio               419 fs/jfs/jfs_metapage.c 		bio = bio_alloc(GFP_NOFS, 1);
bio               420 fs/jfs/jfs_metapage.c 		bio_set_dev(bio, inode->i_sb->s_bdev);
bio               421 fs/jfs/jfs_metapage.c 		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio               422 fs/jfs/jfs_metapage.c 		bio->bi_end_io = metapage_write_end_io;
bio               423 fs/jfs/jfs_metapage.c 		bio->bi_private = page;
bio               424 fs/jfs/jfs_metapage.c 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio               433 fs/jfs/jfs_metapage.c 	if (bio) {
bio               434 fs/jfs/jfs_metapage.c 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
bio               436 fs/jfs/jfs_metapage.c 		if (!bio->bi_iter.bi_size)
bio               439 fs/jfs/jfs_metapage.c 		submit_bio(bio);
bio               460 fs/jfs/jfs_metapage.c 		       4, bio, sizeof(*bio), 0);
bio               462 fs/jfs/jfs_metapage.c 	bio_put(bio);
bio               474 fs/jfs/jfs_metapage.c 	struct bio *bio = NULL;
bio               496 fs/jfs/jfs_metapage.c 			if (bio)
bio               497 fs/jfs/jfs_metapage.c 				submit_bio(bio);
bio               499 fs/jfs/jfs_metapage.c 			bio = bio_alloc(GFP_NOFS, 1);
bio               500 fs/jfs/jfs_metapage.c 			bio_set_dev(bio, inode->i_sb->s_bdev);
bio               501 fs/jfs/jfs_metapage.c 			bio->bi_iter.bi_sector =
bio               503 fs/jfs/jfs_metapage.c 			bio->bi_end_io = metapage_read_end_io;
bio               504 fs/jfs/jfs_metapage.c 			bio->bi_private = page;
bio               505 fs/jfs/jfs_metapage.c 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio               508 fs/jfs/jfs_metapage.c 			if (bio_add_page(bio, page, len, offset) < len)
bio               514 fs/jfs/jfs_metapage.c 	if (bio)
bio               515 fs/jfs/jfs_metapage.c 		submit_bio(bio);
bio               523 fs/jfs/jfs_metapage.c 	bio_put(bio);
bio                47 fs/mpage.c     static void mpage_end_io(struct bio *bio)
bio                52 fs/mpage.c     	bio_for_each_segment_all(bv, bio, iter_all) {
bio                54 fs/mpage.c     		page_endio(page, bio_op(bio),
bio                55 fs/mpage.c     			   blk_status_to_errno(bio->bi_status));
bio                58 fs/mpage.c     	bio_put(bio);
bio                61 fs/mpage.c     static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
bio                63 fs/mpage.c     	bio->bi_end_io = mpage_end_io;
bio                64 fs/mpage.c     	bio_set_op_attrs(bio, op, op_flags);
bio                65 fs/mpage.c     	guard_bio_eod(bio);
bio                66 fs/mpage.c     	submit_bio(bio);
bio                70 fs/mpage.c     static struct bio *
bio                75 fs/mpage.c     	struct bio *bio;
bio                79 fs/mpage.c     	bio = bio_alloc(gfp_flags, nr_vecs);
bio                81 fs/mpage.c     	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
bio                82 fs/mpage.c     		while (!bio && (nr_vecs /= 2))
bio                83 fs/mpage.c     			bio = bio_alloc(gfp_flags, nr_vecs);
bio                86 fs/mpage.c     	if (bio) {
bio                87 fs/mpage.c     		bio_set_dev(bio, bdev);
bio                88 fs/mpage.c     		bio->bi_iter.bi_sector = first_sector;
bio                90 fs/mpage.c     	return bio;
bio               137 fs/mpage.c     	struct bio *bio;
bio               156 fs/mpage.c     static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
bio               296 fs/mpage.c     	if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
bio               297 fs/mpage.c     		args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
bio               300 fs/mpage.c     	if (args->bio == NULL) {
bio               306 fs/mpage.c     		args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
bio               310 fs/mpage.c     		if (args->bio == NULL)
bio               315 fs/mpage.c     	if (bio_add_page(args->bio, page, length, 0) < length) {
bio               316 fs/mpage.c     		args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
bio               324 fs/mpage.c     		args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
bio               328 fs/mpage.c     	return args->bio;
bio               331 fs/mpage.c     	if (args->bio)
bio               332 fs/mpage.c     		args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
bio               404 fs/mpage.c     			args.bio = do_mpage_readpage(&args);
bio               409 fs/mpage.c     	if (args.bio)
bio               410 fs/mpage.c     		mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
bio               426 fs/mpage.c     	args.bio = do_mpage_readpage(&args);
bio               427 fs/mpage.c     	if (args.bio)
bio               428 fs/mpage.c     		mpage_bio_submit(REQ_OP_READ, 0, args.bio);
bio               451 fs/mpage.c     	struct bio *bio;
bio               500 fs/mpage.c     	struct bio *bio = mpd->bio;
bio               626 fs/mpage.c     	if (bio && mpd->last_block_in_bio != blocks[0] - 1)
bio               627 fs/mpage.c     		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
bio               630 fs/mpage.c     	if (bio == NULL) {
bio               636 fs/mpage.c     		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
bio               638 fs/mpage.c     		if (bio == NULL)
bio               641 fs/mpage.c     		wbc_init_bio(wbc, bio);
bio               642 fs/mpage.c     		bio->bi_write_hint = inode->i_write_hint;
bio               652 fs/mpage.c     	if (bio_add_page(bio, page, length, 0) < length) {
bio               653 fs/mpage.c     		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
bio               663 fs/mpage.c     		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
bio               674 fs/mpage.c     	if (bio)
bio               675 fs/mpage.c     		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
bio               688 fs/mpage.c     	mpd->bio = bio;
bio               724 fs/mpage.c     			.bio = NULL,
bio               731 fs/mpage.c     		if (mpd.bio) {
bio               734 fs/mpage.c     			mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
bio               746 fs/mpage.c     		.bio = NULL,
bio               752 fs/mpage.c     	if (mpd.bio) {
bio               755 fs/mpage.c     		mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
bio               104 fs/nfs/blocklayout/blocklayout.c static struct bio *
bio               105 fs/nfs/blocklayout/blocklayout.c bl_submit_bio(struct bio *bio)
bio               107 fs/nfs/blocklayout/blocklayout.c 	if (bio) {
bio               108 fs/nfs/blocklayout/blocklayout.c 		get_parallel(bio->bi_private);
bio               110 fs/nfs/blocklayout/blocklayout.c 			bio_op(bio) == READ ? "read" : "write",
bio               111 fs/nfs/blocklayout/blocklayout.c 			bio->bi_iter.bi_size,
bio               112 fs/nfs/blocklayout/blocklayout.c 			(unsigned long long)bio->bi_iter.bi_sector);
bio               113 fs/nfs/blocklayout/blocklayout.c 		submit_bio(bio);
bio               118 fs/nfs/blocklayout/blocklayout.c static struct bio *
bio               122 fs/nfs/blocklayout/blocklayout.c 	struct bio *bio;
bio               125 fs/nfs/blocklayout/blocklayout.c 	bio = bio_alloc(GFP_NOIO, npg);
bio               126 fs/nfs/blocklayout/blocklayout.c 	if (!bio && (current->flags & PF_MEMALLOC)) {
bio               127 fs/nfs/blocklayout/blocklayout.c 		while (!bio && (npg /= 2))
bio               128 fs/nfs/blocklayout/blocklayout.c 			bio = bio_alloc(GFP_NOIO, npg);
bio               131 fs/nfs/blocklayout/blocklayout.c 	if (bio) {
bio               132 fs/nfs/blocklayout/blocklayout.c 		bio->bi_iter.bi_sector = disk_sector;
bio               133 fs/nfs/blocklayout/blocklayout.c 		bio_set_dev(bio, bdev);
bio               134 fs/nfs/blocklayout/blocklayout.c 		bio->bi_end_io = end_io;
bio               135 fs/nfs/blocklayout/blocklayout.c 		bio->bi_private = par;
bio               137 fs/nfs/blocklayout/blocklayout.c 	return bio;
bio               145 fs/nfs/blocklayout/blocklayout.c static struct bio *
bio               146 fs/nfs/blocklayout/blocklayout.c do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
bio               167 fs/nfs/blocklayout/blocklayout.c 		bio = bl_submit_bio(bio);
bio               178 fs/nfs/blocklayout/blocklayout.c 	if (!bio) {
bio               179 fs/nfs/blocklayout/blocklayout.c 		bio = bl_alloc_init_bio(npg, map->bdev,
bio               181 fs/nfs/blocklayout/blocklayout.c 		if (!bio)
bio               183 fs/nfs/blocklayout/blocklayout.c 		bio_set_op_attrs(bio, rw, 0);
bio               185 fs/nfs/blocklayout/blocklayout.c 	if (bio_add_page(bio, page, *len, offset) < *len) {
bio               186 fs/nfs/blocklayout/blocklayout.c 		bio = bl_submit_bio(bio);
bio               189 fs/nfs/blocklayout/blocklayout.c 	return bio;
bio               215 fs/nfs/blocklayout/blocklayout.c static void bl_end_io_read(struct bio *bio)
bio               217 fs/nfs/blocklayout/blocklayout.c 	struct parallel_io *par = bio->bi_private;
bio               219 fs/nfs/blocklayout/blocklayout.c 	if (bio->bi_status) {
bio               228 fs/nfs/blocklayout/blocklayout.c 	bio_put(bio);
bio               257 fs/nfs/blocklayout/blocklayout.c 	struct bio *bio = NULL;
bio               286 fs/nfs/blocklayout/blocklayout.c 			bio = bl_submit_bio(bio);
bio               307 fs/nfs/blocklayout/blocklayout.c 			bio = bl_submit_bio(bio);
bio               315 fs/nfs/blocklayout/blocklayout.c 			bio = do_add_page_to_bio(bio,
bio               321 fs/nfs/blocklayout/blocklayout.c 			if (IS_ERR(bio)) {
bio               322 fs/nfs/blocklayout/blocklayout.c 				header->pnfs_error = PTR_ERR(bio);
bio               323 fs/nfs/blocklayout/blocklayout.c 				bio = NULL;
bio               340 fs/nfs/blocklayout/blocklayout.c 	bl_submit_bio(bio);
bio               346 fs/nfs/blocklayout/blocklayout.c static void bl_end_io_write(struct bio *bio)
bio               348 fs/nfs/blocklayout/blocklayout.c 	struct parallel_io *par = bio->bi_private;
bio               351 fs/nfs/blocklayout/blocklayout.c 	if (bio->bi_status) {
bio               357 fs/nfs/blocklayout/blocklayout.c 	bio_put(bio);
bio               402 fs/nfs/blocklayout/blocklayout.c 	struct bio *bio = NULL;
bio               434 fs/nfs/blocklayout/blocklayout.c 			bio = bl_submit_bio(bio);
bio               445 fs/nfs/blocklayout/blocklayout.c 		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
bio               449 fs/nfs/blocklayout/blocklayout.c 		if (IS_ERR(bio)) {
bio               450 fs/nfs/blocklayout/blocklayout.c 			header->pnfs_error = PTR_ERR(bio);
bio               451 fs/nfs/blocklayout/blocklayout.c 			bio = NULL;
bio               463 fs/nfs/blocklayout/blocklayout.c 	bl_submit_bio(bio);
bio                22 fs/nilfs2/segbuf.c 	struct bio	       *bio;
bio               328 fs/nilfs2/segbuf.c static void nilfs_end_bio_write(struct bio *bio)
bio               330 fs/nilfs2/segbuf.c 	struct nilfs_segment_buffer *segbuf = bio->bi_private;
bio               332 fs/nilfs2/segbuf.c 	if (bio->bi_status)
bio               335 fs/nilfs2/segbuf.c 	bio_put(bio);
bio               343 fs/nilfs2/segbuf.c 	struct bio *bio = wi->bio;
bio               351 fs/nilfs2/segbuf.c 			bio_put(bio);
bio               357 fs/nilfs2/segbuf.c 	bio->bi_end_io = nilfs_end_bio_write;
bio               358 fs/nilfs2/segbuf.c 	bio->bi_private = segbuf;
bio               359 fs/nilfs2/segbuf.c 	bio_set_op_attrs(bio, mode, mode_flags);
bio               360 fs/nilfs2/segbuf.c 	submit_bio(bio);
bio               363 fs/nilfs2/segbuf.c 	wi->bio = NULL;
bio               370 fs/nilfs2/segbuf.c 	wi->bio = NULL;
bio               383 fs/nilfs2/segbuf.c static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
bio               386 fs/nilfs2/segbuf.c 	struct bio *bio;
bio               388 fs/nilfs2/segbuf.c 	bio = bio_alloc(GFP_NOIO, nr_vecs);
bio               389 fs/nilfs2/segbuf.c 	if (bio == NULL) {
bio               390 fs/nilfs2/segbuf.c 		while (!bio && (nr_vecs >>= 1))
bio               391 fs/nilfs2/segbuf.c 			bio = bio_alloc(GFP_NOIO, nr_vecs);
bio               393 fs/nilfs2/segbuf.c 	if (likely(bio)) {
bio               394 fs/nilfs2/segbuf.c 		bio_set_dev(bio, nilfs->ns_bdev);
bio               395 fs/nilfs2/segbuf.c 		bio->bi_iter.bi_sector =
bio               398 fs/nilfs2/segbuf.c 	return bio;
bio               404 fs/nilfs2/segbuf.c 	wi->bio = NULL;
bio               420 fs/nilfs2/segbuf.c 	if (!wi->bio) {
bio               421 fs/nilfs2/segbuf.c 		wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
bio               423 fs/nilfs2/segbuf.c 		if (unlikely(!wi->bio))
bio               427 fs/nilfs2/segbuf.c 	len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
bio               474 fs/nilfs2/segbuf.c 	if (wi.bio) {
bio               492 fs/ocfs2/cluster/heartbeat.c static void o2hb_bio_end_io(struct bio *bio)
bio               494 fs/ocfs2/cluster/heartbeat.c 	struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
bio               496 fs/ocfs2/cluster/heartbeat.c 	if (bio->bi_status) {
bio               497 fs/ocfs2/cluster/heartbeat.c 		mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
bio               498 fs/ocfs2/cluster/heartbeat.c 		wc->wc_error = blk_status_to_errno(bio->bi_status);
bio               502 fs/ocfs2/cluster/heartbeat.c 	bio_put(bio);
bio               507 fs/ocfs2/cluster/heartbeat.c static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
bio               518 fs/ocfs2/cluster/heartbeat.c 	struct bio *bio;
bio               525 fs/ocfs2/cluster/heartbeat.c 	bio = bio_alloc(GFP_ATOMIC, 16);
bio               526 fs/ocfs2/cluster/heartbeat.c 	if (!bio) {
bio               528 fs/ocfs2/cluster/heartbeat.c 		bio = ERR_PTR(-ENOMEM);
bio               533 fs/ocfs2/cluster/heartbeat.c 	bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio               534 fs/ocfs2/cluster/heartbeat.c 	bio_set_dev(bio, reg->hr_bdev);
bio               535 fs/ocfs2/cluster/heartbeat.c 	bio->bi_private = wc;
bio               536 fs/ocfs2/cluster/heartbeat.c 	bio->bi_end_io = o2hb_bio_end_io;
bio               537 fs/ocfs2/cluster/heartbeat.c 	bio_set_op_attrs(bio, op, op_flags);
bio               550 fs/ocfs2/cluster/heartbeat.c 		len = bio_add_page(bio, page, vec_len, vec_start);
bio               559 fs/ocfs2/cluster/heartbeat.c 	return bio;
bio               569 fs/ocfs2/cluster/heartbeat.c 	struct bio *bio;
bio               574 fs/ocfs2/cluster/heartbeat.c 		bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots,
bio               576 fs/ocfs2/cluster/heartbeat.c 		if (IS_ERR(bio)) {
bio               577 fs/ocfs2/cluster/heartbeat.c 			status = PTR_ERR(bio);
bio               583 fs/ocfs2/cluster/heartbeat.c 		submit_bio(bio);
bio               601 fs/ocfs2/cluster/heartbeat.c 	struct bio *bio;
bio               607 fs/ocfs2/cluster/heartbeat.c 	bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
bio               609 fs/ocfs2/cluster/heartbeat.c 	if (IS_ERR(bio)) {
bio               610 fs/ocfs2/cluster/heartbeat.c 		status = PTR_ERR(bio);
bio               616 fs/ocfs2/cluster/heartbeat.c 	submit_bio(bio);
bio               221 fs/verity/verify.c void fsverity_verify_bio(struct bio *bio)
bio               223 fs/verity/verify.c 	struct inode *inode = bio_first_page_all(bio)->mapping->host;
bio               231 fs/verity/verify.c 		bio_for_each_segment_all(bv, bio, iter_all)
bio               236 fs/verity/verify.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bio                89 fs/xfs/xfs_aops.c 	struct bio		*bio = &ioend->io_inline_bio;
bio                90 fs/xfs/xfs_aops.c 	struct bio		*last = ioend->io_bio, *next;
bio                91 fs/xfs/xfs_aops.c 	u64			start = bio->bi_iter.bi_sector;
bio                92 fs/xfs/xfs_aops.c 	bool			quiet = bio_flagged(bio, BIO_QUIET);
bio                94 fs/xfs/xfs_aops.c 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
bio               102 fs/xfs/xfs_aops.c 		if (bio == last)
bio               105 fs/xfs/xfs_aops.c 			next = bio->bi_private;
bio               108 fs/xfs/xfs_aops.c 		bio_for_each_segment_all(bvec, bio, iter_all)
bio               110 fs/xfs/xfs_aops.c 		bio_put(bio);
bio               398 fs/xfs/xfs_aops.c 	struct bio		*bio)
bio               400 fs/xfs/xfs_aops.c 	struct xfs_ioend	*ioend = bio->bi_private;
bio               415 fs/xfs/xfs_aops.c 		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
bio               704 fs/xfs/xfs_aops.c 	struct bio		*bio;
bio               706 fs/xfs/xfs_aops.c 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
bio               707 fs/xfs/xfs_aops.c 	bio_set_dev(bio, bdev);
bio               708 fs/xfs/xfs_aops.c 	bio->bi_iter.bi_sector = sector;
bio               709 fs/xfs/xfs_aops.c 	bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
bio               710 fs/xfs/xfs_aops.c 	bio->bi_write_hint = inode->i_write_hint;
bio               711 fs/xfs/xfs_aops.c 	wbc_init_bio(wbc, bio);
bio               713 fs/xfs/xfs_aops.c 	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
bio               721 fs/xfs/xfs_aops.c 	ioend->io_bio = bio;
bio               732 fs/xfs/xfs_aops.c static struct bio *
bio               734 fs/xfs/xfs_aops.c 	struct bio		*prev)
bio               736 fs/xfs/xfs_aops.c 	struct bio *new;
bio                22 fs/xfs/xfs_aops.h 	struct bio		*io_bio;	/* bio being built */
bio                23 fs/xfs/xfs_aops.h 	struct bio		io_inline_bio;	/* MUST BE LAST! */
bio                24 fs/xfs/xfs_bio_io.c 	struct bio		*bio;
bio                29 fs/xfs/xfs_bio_io.c 	bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
bio                30 fs/xfs/xfs_bio_io.c 	bio_set_dev(bio, bdev);
bio                31 fs/xfs/xfs_bio_io.c 	bio->bi_iter.bi_sector = sector;
bio                32 fs/xfs/xfs_bio_io.c 	bio->bi_opf = op | REQ_META | REQ_SYNC;
bio                39 fs/xfs/xfs_bio_io.c 		while (bio_add_page(bio, page, len, off) != len) {
bio                40 fs/xfs/xfs_bio_io.c 			struct bio	*prev = bio;
bio                42 fs/xfs/xfs_bio_io.c 			bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
bio                43 fs/xfs/xfs_bio_io.c 			bio_copy_dev(bio, prev);
bio                44 fs/xfs/xfs_bio_io.c 			bio->bi_iter.bi_sector = bio_end_sector(prev);
bio                45 fs/xfs/xfs_bio_io.c 			bio->bi_opf = prev->bi_opf;
bio                46 fs/xfs/xfs_bio_io.c 			bio_chain(prev, bio);
bio                55 fs/xfs/xfs_bio_io.c 	error = submit_bio_wait(bio);
bio                56 fs/xfs/xfs_bio_io.c 	bio_put(bio);
bio              1236 fs/xfs/xfs_buf.c 	struct bio		*bio)
bio              1238 fs/xfs/xfs_buf.c 	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
bio              1244 fs/xfs/xfs_buf.c 	if (bio->bi_status) {
bio              1245 fs/xfs/xfs_buf.c 		int error = blk_status_to_errno(bio->bi_status);
bio              1255 fs/xfs/xfs_buf.c 	bio_put(bio);
bio              1270 fs/xfs/xfs_buf.c 	struct bio	*bio;
bio              1295 fs/xfs/xfs_buf.c 	bio = bio_alloc(GFP_NOIO, nr_pages);
bio              1296 fs/xfs/xfs_buf.c 	bio_set_dev(bio, bp->b_target->bt_bdev);
bio              1297 fs/xfs/xfs_buf.c 	bio->bi_iter.bi_sector = sector;
bio              1298 fs/xfs/xfs_buf.c 	bio->bi_end_io = xfs_buf_bio_end_io;
bio              1299 fs/xfs/xfs_buf.c 	bio->bi_private = bp;
bio              1300 fs/xfs/xfs_buf.c 	bio_set_op_attrs(bio, op, op_flags);
bio              1308 fs/xfs/xfs_buf.c 		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
bio              1319 fs/xfs/xfs_buf.c 	if (likely(bio->bi_iter.bi_size)) {
bio              1324 fs/xfs/xfs_buf.c 		submit_bio(bio);
bio              1334 fs/xfs/xfs_buf.c 		bio_put(bio);
bio              1687 fs/xfs/xfs_log.c 	struct bio		*bio)
bio              1689 fs/xfs/xfs_log.c 	struct xlog_in_core	*iclog = bio->bi_private;
bio              1697 fs/xfs/xfs_log.c 	struct bio		*bio,
bio              1706 fs/xfs/xfs_log.c 		WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len);
bio              1765 fs/xfs/xfs_log.c 		struct bio *split;
bio               519 fs/xfs/xfs_log_cil.c 	struct bio		*bio)
bio               521 fs/xfs/xfs_log_cil.c 	struct xfs_cil_ctx	*ctx = bio->bi_private;
bio               525 fs/xfs/xfs_log_cil.c 	bio_put(bio);
bio               535 fs/xfs/xfs_log_cil.c 	struct bio		*bio = NULL;
bio               549 fs/xfs/xfs_log_cil.c 				GFP_NOFS, 0, &bio);
bio               560 fs/xfs/xfs_log_cil.c 	if (bio) {
bio               561 fs/xfs/xfs_log_cil.c 		bio->bi_private = ctx;
bio               562 fs/xfs/xfs_log_cil.c 		bio->bi_end_io = xlog_discard_endio;
bio               563 fs/xfs/xfs_log_cil.c 		submit_bio(bio);
bio               226 fs/xfs/xfs_log_priv.h 	struct bio		ic_bio;
bio                26 include/linux/bio.h #define bio_prio(bio)			(bio)->bi_ioprio
bio                27 include/linux/bio.h #define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
bio                29 include/linux/bio.h #define bio_iter_iovec(bio, iter)				\
bio                30 include/linux/bio.h 	bvec_iter_bvec((bio)->bi_io_vec, (iter))
bio                32 include/linux/bio.h #define bio_iter_page(bio, iter)				\
bio                33 include/linux/bio.h 	bvec_iter_page((bio)->bi_io_vec, (iter))
bio                34 include/linux/bio.h #define bio_iter_len(bio, iter)					\
bio                35 include/linux/bio.h 	bvec_iter_len((bio)->bi_io_vec, (iter))
bio                36 include/linux/bio.h #define bio_iter_offset(bio, iter)				\
bio                37 include/linux/bio.h 	bvec_iter_offset((bio)->bi_io_vec, (iter))
bio                39 include/linux/bio.h #define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
bio                40 include/linux/bio.h #define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
bio                41 include/linux/bio.h #define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
bio                43 include/linux/bio.h #define bio_multiple_segments(bio)				\
bio                44 include/linux/bio.h 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
bio                49 include/linux/bio.h #define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
bio                50 include/linux/bio.h #define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
bio                55 include/linux/bio.h #define bio_data_dir(bio) \
bio                56 include/linux/bio.h 	(op_is_write(bio_op(bio)) ? WRITE : READ)
bio                61 include/linux/bio.h static inline bool bio_has_data(struct bio *bio)
bio                63 include/linux/bio.h 	if (bio &&
bio                64 include/linux/bio.h 	    bio->bi_iter.bi_size &&
bio                65 include/linux/bio.h 	    bio_op(bio) != REQ_OP_DISCARD &&
bio                66 include/linux/bio.h 	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
bio                67 include/linux/bio.h 	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
bio                73 include/linux/bio.h static inline bool bio_no_advance_iter(struct bio *bio)
bio                75 include/linux/bio.h 	return bio_op(bio) == REQ_OP_DISCARD ||
bio                76 include/linux/bio.h 	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
bio                77 include/linux/bio.h 	       bio_op(bio) == REQ_OP_WRITE_SAME ||
bio                78 include/linux/bio.h 	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
bio                81 include/linux/bio.h static inline bool bio_mergeable(struct bio *bio)
bio                83 include/linux/bio.h 	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
bio                89 include/linux/bio.h static inline unsigned int bio_cur_bytes(struct bio *bio)
bio                91 include/linux/bio.h 	if (bio_has_data(bio))
bio                92 include/linux/bio.h 		return bio_iovec(bio).bv_len;
bio                94 include/linux/bio.h 		return bio->bi_iter.bi_size;
bio                97 include/linux/bio.h static inline void *bio_data(struct bio *bio)
bio                99 include/linux/bio.h 	if (bio_has_data(bio))
bio               100 include/linux/bio.h 		return page_address(bio_page(bio)) + bio_offset(bio);
bio               113 include/linux/bio.h static inline bool bio_full(struct bio *bio, unsigned len)
bio               115 include/linux/bio.h 	if (bio->bi_vcnt >= bio->bi_max_vecs)
bio               118 include/linux/bio.h 	if (bio->bi_iter.bi_size > UINT_MAX - len)
bio               124 include/linux/bio.h static inline bool bio_next_segment(const struct bio *bio,
bio               127 include/linux/bio.h 	if (iter->idx >= bio->bi_vcnt)
bio               130 include/linux/bio.h 	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
bio               138 include/linux/bio.h #define bio_for_each_segment_all(bvl, bio, iter) \
bio               139 include/linux/bio.h 	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
bio               141 include/linux/bio.h static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
bio               146 include/linux/bio.h 	if (bio_no_advance_iter(bio))
bio               149 include/linux/bio.h 		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
bio               153 include/linux/bio.h #define __bio_for_each_segment(bvl, bio, iter, start)			\
bio               156 include/linux/bio.h 		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
bio               157 include/linux/bio.h 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
bio               159 include/linux/bio.h #define bio_for_each_segment(bvl, bio, iter)				\
bio               160 include/linux/bio.h 	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
bio               162 include/linux/bio.h #define __bio_for_each_bvec(bvl, bio, iter, start)		\
bio               165 include/linux/bio.h 		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
bio               166 include/linux/bio.h 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
bio               169 include/linux/bio.h #define bio_for_each_bvec(bvl, bio, iter)			\
bio               170 include/linux/bio.h 	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
bio               174 include/linux/bio.h static inline unsigned bio_segments(struct bio *bio)
bio               185 include/linux/bio.h 	switch (bio_op(bio)) {
bio               196 include/linux/bio.h 	bio_for_each_segment(bv, bio, iter)
bio               216 include/linux/bio.h static inline void bio_get(struct bio *bio)
bio               218 include/linux/bio.h 	bio->bi_flags |= (1 << BIO_REFFED);
bio               220 include/linux/bio.h 	atomic_inc(&bio->__bi_cnt);
bio               223 include/linux/bio.h static inline void bio_cnt_set(struct bio *bio, unsigned int count)
bio               226 include/linux/bio.h 		bio->bi_flags |= (1 << BIO_REFFED);
bio               229 include/linux/bio.h 	atomic_set(&bio->__bi_cnt, count);
bio               232 include/linux/bio.h static inline bool bio_flagged(struct bio *bio, unsigned int bit)
bio               234 include/linux/bio.h 	return (bio->bi_flags & (1U << bit)) != 0;
bio               237 include/linux/bio.h static inline void bio_set_flag(struct bio *bio, unsigned int bit)
bio               239 include/linux/bio.h 	bio->bi_flags |= (1U << bit);
bio               242 include/linux/bio.h static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio               244 include/linux/bio.h 	bio->bi_flags &= ~(1U << bit);
bio               247 include/linux/bio.h static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
bio               249 include/linux/bio.h 	*bv = bio_iovec(bio);
bio               252 include/linux/bio.h static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bio               254 include/linux/bio.h 	struct bvec_iter iter = bio->bi_iter;
bio               257 include/linux/bio.h 	if (unlikely(!bio_multiple_segments(bio))) {
bio               258 include/linux/bio.h 		*bv = bio_iovec(bio);
bio               262 include/linux/bio.h 	bio_advance_iter(bio, &iter, iter.bi_size);
bio               269 include/linux/bio.h 	*bv = bio->bi_io_vec[idx];
bio               279 include/linux/bio.h static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
bio               281 include/linux/bio.h 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
bio               282 include/linux/bio.h 	return bio->bi_io_vec;
bio               285 include/linux/bio.h static inline struct page *bio_first_page_all(struct bio *bio)
bio               287 include/linux/bio.h 	return bio_first_bvec_all(bio)->bv_page;
bio               290 include/linux/bio.h static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
bio               292 include/linux/bio.h 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
bio               293 include/linux/bio.h 	return &bio->bi_io_vec[bio->bi_vcnt - 1];
bio               308 include/linux/bio.h 	struct bio		*bip_bio;	/* parent bio */
bio               327 include/linux/bio.h static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
bio               329 include/linux/bio.h 	if (bio->bi_opf & REQ_INTEGRITY)
bio               330 include/linux/bio.h 		return bio->bi_integrity;
bio               335 include/linux/bio.h static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
bio               337 include/linux/bio.h 	struct bio_integrity_payload *bip = bio_integrity(bio);
bio               358 include/linux/bio.h extern void bio_trim(struct bio *bio, int offset, int size);
bio               359 include/linux/bio.h extern struct bio *bio_split(struct bio *bio, int sectors,
bio               372 include/linux/bio.h static inline struct bio *bio_next_split(struct bio *bio, int sectors,
bio               375 include/linux/bio.h 	if (sectors >= bio_sectors(bio))
bio               376 include/linux/bio.h 		return bio;
bio               378 include/linux/bio.h 	return bio_split(bio, sectors, gfp, bs);
bio               390 include/linux/bio.h extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
bio               391 include/linux/bio.h extern void bio_put(struct bio *);
bio               393 include/linux/bio.h extern void __bio_clone_fast(struct bio *, struct bio *);
bio               394 include/linux/bio.h extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
bio               398 include/linux/bio.h static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
bio               403 include/linux/bio.h static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
bio               408 include/linux/bio.h extern blk_qc_t submit_bio(struct bio *);
bio               410 include/linux/bio.h extern void bio_endio(struct bio *);
bio               412 include/linux/bio.h static inline void bio_io_error(struct bio *bio)
bio               414 include/linux/bio.h 	bio->bi_status = BLK_STS_IOERR;
bio               415 include/linux/bio.h 	bio_endio(bio);
bio               418 include/linux/bio.h static inline void bio_wouldblock_error(struct bio *bio)
bio               420 include/linux/bio.h 	bio->bi_status = BLK_STS_AGAIN;
bio               421 include/linux/bio.h 	bio_endio(bio);
bio               426 include/linux/bio.h extern int submit_bio_wait(struct bio *bio);
bio               427 include/linux/bio.h extern void bio_advance(struct bio *, unsigned);
bio               429 include/linux/bio.h extern void bio_init(struct bio *bio, struct bio_vec *table,
bio               431 include/linux/bio.h extern void bio_uninit(struct bio *);
bio               432 include/linux/bio.h extern void bio_reset(struct bio *);
bio               433 include/linux/bio.h void bio_chain(struct bio *, struct bio *);
bio               435 include/linux/bio.h extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
bio               436 include/linux/bio.h extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
bio               438 include/linux/bio.h bool __bio_try_merge_page(struct bio *bio, struct page *page,
bio               440 include/linux/bio.h void __bio_add_page(struct bio *bio, struct page *page,
bio               442 include/linux/bio.h int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
bio               443 include/linux/bio.h void bio_release_pages(struct bio *bio, bool mark_dirty);
bio               445 include/linux/bio.h extern struct bio *bio_map_user_iov(struct request_queue *,
bio               447 include/linux/bio.h extern void bio_unmap_user(struct bio *);
bio               448 include/linux/bio.h extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
bio               450 include/linux/bio.h extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
bio               452 include/linux/bio.h extern void bio_set_pages_dirty(struct bio *bio);
bio               453 include/linux/bio.h extern void bio_check_pages_dirty(struct bio *bio);
bio               461 include/linux/bio.h extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
bio               462 include/linux/bio.h 			       struct bio *src, struct bvec_iter *src_iter);
bio               463 include/linux/bio.h extern void bio_copy_data(struct bio *dst, struct bio *src);
bio               464 include/linux/bio.h extern void bio_list_copy_data(struct bio *dst, struct bio *src);
bio               465 include/linux/bio.h extern void bio_free_pages(struct bio *bio);
bio               467 include/linux/bio.h extern struct bio *bio_copy_user_iov(struct request_queue *,
bio               471 include/linux/bio.h extern int bio_uncopy_user(struct bio *);
bio               472 include/linux/bio.h void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
bio               473 include/linux/bio.h void bio_truncate(struct bio *bio, unsigned new_size);
bio               475 include/linux/bio.h static inline void zero_fill_bio(struct bio *bio)
bio               477 include/linux/bio.h 	zero_fill_bio_iter(bio, bio->bi_iter);
bio               483 include/linux/bio.h extern const char *bio_devname(struct bio *bio, char *buffer);
bio               485 include/linux/bio.h #define bio_set_dev(bio, bdev) 			\
bio               487 include/linux/bio.h 	if ((bio)->bi_disk != (bdev)->bd_disk)	\
bio               488 include/linux/bio.h 		bio_clear_flag(bio, BIO_THROTTLED);\
bio               489 include/linux/bio.h 	(bio)->bi_disk = (bdev)->bd_disk;	\
bio               490 include/linux/bio.h 	(bio)->bi_partno = (bdev)->bd_partno;	\
bio               491 include/linux/bio.h 	bio_associate_blkg(bio);		\
bio               501 include/linux/bio.h #define bio_dev(bio) \
bio               502 include/linux/bio.h 	disk_devt((bio)->bi_disk)
bio               505 include/linux/bio.h void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
bio               507 include/linux/bio.h static inline void bio_associate_blkg_from_page(struct bio *bio,
bio               512 include/linux/bio.h void bio_disassociate_blkg(struct bio *bio);
bio               513 include/linux/bio.h void bio_associate_blkg(struct bio *bio);
bio               514 include/linux/bio.h void bio_associate_blkg_from_css(struct bio *bio,
bio               516 include/linux/bio.h void bio_clone_blkg_association(struct bio *dst, struct bio *src);
bio               518 include/linux/bio.h static inline void bio_disassociate_blkg(struct bio *bio) { }
bio               519 include/linux/bio.h static inline void bio_associate_blkg(struct bio *bio) { }
bio               520 include/linux/bio.h static inline void bio_associate_blkg_from_css(struct bio *bio,
bio               523 include/linux/bio.h static inline void bio_clone_blkg_association(struct bio *dst,
bio               524 include/linux/bio.h 					      struct bio *src) { }
bio               576 include/linux/bio.h 	struct bio *head;
bio               577 include/linux/bio.h 	struct bio *tail;
bio               592 include/linux/bio.h #define bio_list_for_each(bio, bl) \
bio               593 include/linux/bio.h 	for (bio = (bl)->head; bio; bio = bio->bi_next)
bio               598 include/linux/bio.h 	struct bio *bio;
bio               600 include/linux/bio.h 	bio_list_for_each(bio, bl)
bio               606 include/linux/bio.h static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
bio               608 include/linux/bio.h 	bio->bi_next = NULL;
bio               611 include/linux/bio.h 		bl->tail->bi_next = bio;
bio               613 include/linux/bio.h 		bl->head = bio;
bio               615 include/linux/bio.h 	bl->tail = bio;
bio               618 include/linux/bio.h static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
bio               620 include/linux/bio.h 	bio->bi_next = bl->head;
bio               622 include/linux/bio.h 	bl->head = bio;
bio               625 include/linux/bio.h 		bl->tail = bio;
bio               655 include/linux/bio.h static inline struct bio *bio_list_peek(struct bio_list *bl)
bio               660 include/linux/bio.h static inline struct bio *bio_list_pop(struct bio_list *bl)
bio               662 include/linux/bio.h 	struct bio *bio = bl->head;
bio               664 include/linux/bio.h 	if (bio) {
bio               669 include/linux/bio.h 		bio->bi_next = NULL;
bio               672 include/linux/bio.h 	return bio;
bio               675 include/linux/bio.h static inline struct bio *bio_list_get(struct bio_list *bl)
bio               677 include/linux/bio.h 	struct bio *bio = bl->head;
bio               681 include/linux/bio.h 	return bio;
bio               688 include/linux/bio.h static inline void bio_inc_remaining(struct bio *bio)
bio               690 include/linux/bio.h 	bio_set_flag(bio, BIO_CHAIN);
bio               692 include/linux/bio.h 	atomic_inc(&bio->__bi_remaining);
bio               750 include/linux/bio.h extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
bio               751 include/linux/bio.h extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
bio               752 include/linux/bio.h extern bool bio_integrity_prep(struct bio *);
bio               753 include/linux/bio.h extern void bio_integrity_advance(struct bio *, unsigned int);
bio               754 include/linux/bio.h extern void bio_integrity_trim(struct bio *);
bio               755 include/linux/bio.h extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
bio               762 include/linux/bio.h static inline void *bio_integrity(struct bio *bio)
bio               777 include/linux/bio.h static inline bool bio_integrity_prep(struct bio *bio)
bio               782 include/linux/bio.h static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
bio               788 include/linux/bio.h static inline void bio_integrity_advance(struct bio *bio,
bio               794 include/linux/bio.h static inline void bio_integrity_trim(struct bio *bio)
bio               804 include/linux/bio.h static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
bio               809 include/linux/bio.h static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
bio               815 include/linux/bio.h static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
bio               830 include/linux/bio.h static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio               832 include/linux/bio.h 	bio->bi_opf |= REQ_HIPRI;
bio               834 include/linux/bio.h 		bio->bi_opf |= REQ_NOWAIT;
bio               278 include/linux/blk-cgroup.h static inline struct blkcg *__bio_blkcg(struct bio *bio)
bio               280 include/linux/blk-cgroup.h 	if (bio && bio->bi_blkg)
bio               281 include/linux/blk-cgroup.h 		return bio->bi_blkg->blkcg;
bio               293 include/linux/blk-cgroup.h static inline struct blkcg *bio_blkcg(struct bio *bio)
bio               295 include/linux/blk-cgroup.h 	if (bio && bio->bi_blkg)
bio               296 include/linux/blk-cgroup.h 		return bio->bi_blkg->blkcg;
bio               331 include/linux/blk-cgroup.h static inline bool bio_issue_as_root_blkg(struct bio *bio)
bio               333 include/linux/blk-cgroup.h 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
bio               705 include/linux/blk-cgroup.h 			   struct bio *bio);
bio               708 include/linux/blk-cgroup.h 				  struct bio *bio) { return false; }
bio               711 include/linux/blk-cgroup.h bool __blkcg_punt_bio_submit(struct bio *bio);
bio               713 include/linux/blk-cgroup.h static inline bool blkcg_punt_bio_submit(struct bio *bio)
bio               715 include/linux/blk-cgroup.h 	if (bio->bi_opf & REQ_CGROUP_PUNT)
bio               716 include/linux/blk-cgroup.h 		return __blkcg_punt_bio_submit(bio);
bio               721 include/linux/blk-cgroup.h static inline void blkcg_bio_issue_init(struct bio *bio)
bio               723 include/linux/blk-cgroup.h 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
bio               727 include/linux/blk-cgroup.h 					 struct bio *bio)
bio               734 include/linux/blk-cgroup.h 	if (!bio->bi_blkg) {
bio               739 include/linux/blk-cgroup.h 			  bio_devname(bio, b));
bio               740 include/linux/blk-cgroup.h 		bio_associate_blkg(bio);
bio               743 include/linux/blk-cgroup.h 	blkg = bio->bi_blkg;
bio               745 include/linux/blk-cgroup.h 	throtl = blk_throtl_bio(q, blkg, bio);
bio               753 include/linux/blk-cgroup.h 		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
bio               754 include/linux/blk-cgroup.h 			blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
bio               755 include/linux/blk-cgroup.h 					bio->bi_iter.bi_size);
bio               756 include/linux/blk-cgroup.h 		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
bio               759 include/linux/blk-cgroup.h 	blkcg_bio_issue_init(bio);
bio               857 include/linux/blk-cgroup.h static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio               858 include/linux/blk-cgroup.h static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
bio               867 include/linux/blk-cgroup.h static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
bio               868 include/linux/blk-cgroup.h static inline void blkcg_bio_issue_init(struct bio *bio) { }
bio               870 include/linux/blk-cgroup.h 					 struct bio *bio) { return true; }
bio               316 include/linux/blk-mq.h 			   struct bio *bio, unsigned int nr_segs);
bio                14 include/linux/blk_types.h struct bio;
bio                20 include/linux/blk_types.h typedef void (bio_end_io_t) (struct bio *);
bio               145 include/linux/blk_types.h 	struct bio		*bi_next;	/* request queue link */
bio               204 include/linux/blk_types.h #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
bio               244 include/linux/blk_types.h #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
bio               378 include/linux/blk_types.h #define bio_op(bio) \
bio               379 include/linux/blk_types.h 	((bio)->bi_opf & REQ_OP_MASK)
bio               384 include/linux/blk_types.h static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
bio               387 include/linux/blk_types.h 	bio->bi_opf = op | op_flags;
bio               147 include/linux/blkdev.h 	struct bio *bio;
bio               148 include/linux/blkdev.h 	struct bio *biotail;
bio               274 include/linux/blkdev.h static inline bool bio_is_passthrough(struct bio *bio)
bio               276 include/linux/blkdev.h 	unsigned op = bio_op(bio);
bio               290 include/linux/blkdev.h typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
bio               763 include/linux/blkdev.h static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
bio               815 include/linux/blkdev.h 	struct bio *bio;
bio               822 include/linux/blkdev.h 	if ((rq->bio))			\
bio               823 include/linux/blkdev.h 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
bio               826 include/linux/blkdev.h 	__rq_for_each_bio(_iter.bio, _rq)			\
bio               827 include/linux/blkdev.h 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
bio               830 include/linux/blkdev.h 	__rq_for_each_bio(_iter.bio, _rq)			\
bio               831 include/linux/blkdev.h 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
bio               834 include/linux/blkdev.h 		(_iter.bio->bi_next == NULL &&			\
bio               850 include/linux/blkdev.h extern blk_qc_t generic_make_request(struct bio *bio);
bio               851 include/linux/blkdev.h extern blk_qc_t direct_make_request(struct bio *bio);
bio               859 include/linux/blkdev.h 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
bio               864 include/linux/blkdev.h extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
bio               865 include/linux/blkdev.h extern void blk_queue_split(struct request_queue *, struct bio **);
bio               880 include/linux/blkdev.h extern int blk_rq_unmap_user(struct bio *);
bio               937 include/linux/blkdev.h 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
bio               990 include/linux/blkdev.h 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
bio              1043 include/linux/blkdev.h 	struct bio *bio;
bio              1045 include/linux/blkdev.h 	__rq_for_each_bio(bio, rq)
bio              1220 include/linux/blkdev.h 		struct bio **biop);
bio              1226 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
bio              1541 include/linux/blkdev.h extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
bio              1543 include/linux/blkdev.h extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
bio              1547 include/linux/blkdev.h 				    struct bio *);
bio              1612 include/linux/blkdev.h 	return rq->bio->bi_integrity->bip_vec;
bio              1617 include/linux/blkdev.h struct bio;
bio              1627 include/linux/blkdev.h 					    struct bio *b)
bio              1632 include/linux/blkdev.h 					  struct bio *b,
bio              1672 include/linux/blkdev.h 					   struct bio *b)
bio                60 include/linux/bsg-lib.h 	struct bio *bidi_bio;
bio                88 include/linux/ceph/messenger.h 	struct bio *bio;
bio                99 include/linux/ceph/messenger.h 		bio_advance_iter((it)->bio, &(it)->iter, __cur_n);	      \
bio               100 include/linux/ceph/messenger.h 		if (!(it)->iter.bi_size && (it)->bio->bi_next) {	      \
bio               102 include/linux/ceph/messenger.h 			(it)->bio = (it)->bio->bi_next;			      \
bio               103 include/linux/ceph/messenger.h 			(it)->iter = (it)->bio->bi_iter;		      \
bio               125 include/linux/ceph/messenger.h 		__bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
bio                60 include/linux/device-mapper.h typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
bio                77 include/linux/device-mapper.h 			    struct bio *bio, blk_status_t *error);
bio               327 include/linux/device-mapper.h void *dm_per_bio_data(struct bio *bio, size_t data_size);
bio               328 include/linux/device-mapper.h struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
bio               329 include/linux/device-mapper.h unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
bio               424 include/linux/device-mapper.h void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
bio                44 include/linux/dm-io.h 		struct bio *bio;
bio                50 include/linux/dm-region-hash.h region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
bio                79 include/linux/dm-region-hash.h void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
bio                81 include/linux/dm-region-hash.h void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
bio                36 include/linux/elevator.h 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bio                37 include/linux/elevator.h 	bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
bio                38 include/linux/elevator.h 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
bio                42 include/linux/elevator.h 	void (*prepare_request)(struct request *, struct bio *bio);
bio               115 include/linux/elevator.h 		struct bio *);
bio               136 include/linux/elevator.h extern bool elv_bio_merge_ok(struct request *, struct bio *);
bio                49 include/linux/fs.h struct bio;
bio              3152 include/linux/fs.h typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
bio              3163 include/linux/fs.h void dio_end_io(struct bio *bio);
bio                71 include/linux/fscrypt.h 			struct bio *bio;
bio               246 include/linux/fscrypt.h extern void fscrypt_decrypt_bio(struct bio *);
bio               248 include/linux/fscrypt.h 					struct bio *bio);
bio               483 include/linux/fscrypt.h static inline void fscrypt_decrypt_bio(struct bio *bio)
bio               488 include/linux/fscrypt.h 					       struct bio *bio)
bio               134 include/linux/fsverity.h extern void fsverity_verify_bio(struct bio *bio);
bio               184 include/linux/fsverity.h static inline void fsverity_verify_bio(struct bio *bio)
bio               133 include/linux/libnvdimm.h 	int (*flush)(struct nd_region *nd_region, struct bio *bio);
bio               264 include/linux/libnvdimm.h int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
bio               285 include/linux/lightnvm.h 	struct bio *bio;
bio               634 include/linux/lightnvm.h typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
bio               117 include/linux/pktcdvd.h 	struct bio		*w_bio;		/* The bio we will send to the real CD */
bio               132 include/linux/pktcdvd.h 	struct bio		*r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
bio               145 include/linux/pktcdvd.h 	struct bio		*bio;
bio               150 include/linux/pktcdvd.h 	struct bio		*bio;		/* Original read request bio */
bio                19 include/linux/swap.h struct bio;
bio               390 include/linux/swap.h extern void end_swap_bio_write(struct bio *bio);
bio                16 include/linux/writeback.h struct bio;
bio               281 include/linux/writeback.h static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
bio               290 include/linux/writeback.h 		bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
bio               319 include/linux/writeback.h static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
bio                11 include/trace/events/bcache.h 	TP_PROTO(struct bcache_device *d, struct bio *bio),
bio                12 include/trace/events/bcache.h 	TP_ARGS(d, bio),
bio                25 include/trace/events/bcache.h 		__entry->dev		= bio_dev(bio);
bio                28 include/trace/events/bcache.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio                29 include/trace/events/bcache.h 		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
bio                30 include/trace/events/bcache.h 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
bio                31 include/trace/events/bcache.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio                81 include/trace/events/bcache.h 	TP_PROTO(struct bcache_device *d, struct bio *bio),
bio                82 include/trace/events/bcache.h 	TP_ARGS(d, bio)
bio                86 include/trace/events/bcache.h 	TP_PROTO(struct bcache_device *d, struct bio *bio),
bio                87 include/trace/events/bcache.h 	TP_ARGS(d, bio)
bio                91 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio),
bio                92 include/trace/events/bcache.h 	TP_ARGS(bio),
bio               102 include/trace/events/bcache.h 		__entry->dev		= bio_dev(bio);
bio               103 include/trace/events/bcache.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               104 include/trace/events/bcache.h 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
bio               105 include/trace/events/bcache.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               114 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio),
bio               115 include/trace/events/bcache.h 	TP_ARGS(bio)
bio               119 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio),
bio               120 include/trace/events/bcache.h 	TP_ARGS(bio)
bio               124 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio, bool hit, bool bypass),
bio               125 include/trace/events/bcache.h 	TP_ARGS(bio, hit, bypass),
bio               137 include/trace/events/bcache.h 		__entry->dev		= bio_dev(bio);
bio               138 include/trace/events/bcache.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               139 include/trace/events/bcache.h 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
bio               140 include/trace/events/bcache.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               152 include/trace/events/bcache.h 	TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
bio               154 include/trace/events/bcache.h 	TP_ARGS(c, inode, bio, writeback, bypass),
bio               169 include/trace/events/bcache.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               170 include/trace/events/bcache.h 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
bio               171 include/trace/events/bcache.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               183 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio),
bio               184 include/trace/events/bcache.h 	TP_ARGS(bio)
bio               225 include/trace/events/bcache.h 	TP_PROTO(struct bio *bio, u32 keys),
bio               226 include/trace/events/bcache.h 	TP_ARGS(bio, keys),
bio               237 include/trace/events/bcache.h 		__entry->dev		= bio_dev(bio);
bio               238 include/trace/events/bcache.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               239 include/trace/events/bcache.h 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
bio               241 include/trace/events/bcache.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               227 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio),
bio               229 include/trace/events/block.h 	TP_ARGS(q, bio),
bio               240 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               241 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               242 include/trace/events/block.h 		__entry->nr_sector	= bio_sectors(bio);
bio               243 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               264 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
bio               266 include/trace/events/block.h 	TP_ARGS(q, bio, error),
bio               277 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               278 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               279 include/trace/events/block.h 		__entry->nr_sector	= bio_sectors(bio);
bio               281 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               292 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
bio               294 include/trace/events/block.h 	TP_ARGS(q, rq, bio),
bio               305 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               306 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               307 include/trace/events/block.h 		__entry->nr_sector	= bio_sectors(bio);
bio               308 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               329 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
bio               331 include/trace/events/block.h 	TP_ARGS(q, rq, bio)
bio               345 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
bio               347 include/trace/events/block.h 	TP_ARGS(q, rq, bio)
bio               359 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio),
bio               361 include/trace/events/block.h 	TP_ARGS(q, bio),
bio               372 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               373 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               374 include/trace/events/block.h 		__entry->nr_sector	= bio_sectors(bio);
bio               375 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               387 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
bio               389 include/trace/events/block.h 	TP_ARGS(q, bio, rw),
bio               400 include/trace/events/block.h 		__entry->dev		= bio ? bio_dev(bio) : 0;
bio               401 include/trace/events/block.h 		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
bio               402 include/trace/events/block.h 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
bio               404 include/trace/events/block.h 			      bio ? bio->bi_opf : 0, __entry->nr_sector);
bio               425 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
bio               427 include/trace/events/block.h 	TP_ARGS(q, bio, rw)
bio               443 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
bio               445 include/trace/events/block.h 	TP_ARGS(q, bio, rw)
bio               521 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio,
bio               524 include/trace/events/block.h 	TP_ARGS(q, bio, new_sector),
bio               535 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               536 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               538 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio               561 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
bio               564 include/trace/events/block.h 	TP_ARGS(q, bio, dev, from),
bio               576 include/trace/events/block.h 		__entry->dev		= bio_dev(bio);
bio               577 include/trace/events/block.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio               578 include/trace/events/block.h 		__entry->nr_sector	= bio_sectors(bio);
bio               581 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
bio              1064 include/trace/events/f2fs.h 	TP_PROTO(struct super_block *sb, int type, struct bio *bio),
bio              1066 include/trace/events/f2fs.h 	TP_ARGS(sb, type, bio),
bio              1080 include/trace/events/f2fs.h 		__entry->target		= bio_dev(bio);
bio              1081 include/trace/events/f2fs.h 		__entry->op		= bio_op(bio);
bio              1082 include/trace/events/f2fs.h 		__entry->op_flags	= bio->bi_opf;
bio              1084 include/trace/events/f2fs.h 		__entry->sector		= bio->bi_iter.bi_sector;
bio              1085 include/trace/events/f2fs.h 		__entry->size		= bio->bi_iter.bi_size;
bio              1099 include/trace/events/f2fs.h 	TP_PROTO(struct super_block *sb, int type, struct bio *bio),
bio              1101 include/trace/events/f2fs.h 	TP_ARGS(sb, type, bio),
bio              1103 include/trace/events/f2fs.h 	TP_CONDITION(bio)
bio              1108 include/trace/events/f2fs.h 	TP_PROTO(struct super_block *sb, int type, struct bio *bio),
bio              1110 include/trace/events/f2fs.h 	TP_ARGS(sb, type, bio),
bio              1112 include/trace/events/f2fs.h 	TP_CONDITION(bio)
bio              1117 include/trace/events/f2fs.h 	TP_PROTO(struct super_block *sb, int type, struct bio *bio),
bio              1119 include/trace/events/f2fs.h 	TP_ARGS(sb, type, bio),
bio              1121 include/trace/events/f2fs.h 	TP_CONDITION(bio)
bio              1126 include/trace/events/f2fs.h 	TP_PROTO(struct super_block *sb, int type, struct bio *bio),
bio              1128 include/trace/events/f2fs.h 	TP_ARGS(sb, type, bio),
bio              1130 include/trace/events/f2fs.h 	TP_CONDITION(bio)
bio               238 kernel/power/swap.c static void hib_end_io(struct bio *bio)
bio               240 kernel/power/swap.c 	struct hib_bio_batch *hb = bio->bi_private;
bio               241 kernel/power/swap.c 	struct page *page = bio_first_page_all(bio);
bio               243 kernel/power/swap.c 	if (bio->bi_status) {
bio               245 kernel/power/swap.c 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
bio               246 kernel/power/swap.c 			 (unsigned long long)bio->bi_iter.bi_sector);
bio               249 kernel/power/swap.c 	if (bio_data_dir(bio) == WRITE)
bio               255 kernel/power/swap.c 	if (bio->bi_status && !hb->error)
bio               256 kernel/power/swap.c 		hb->error = bio->bi_status;
bio               260 kernel/power/swap.c 	bio_put(bio);
bio               267 kernel/power/swap.c 	struct bio *bio;
bio               270 kernel/power/swap.c 	bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
bio               271 kernel/power/swap.c 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio               272 kernel/power/swap.c 	bio_set_dev(bio, hib_resume_bdev);
bio               273 kernel/power/swap.c 	bio_set_op_attrs(bio, op, op_flags);
bio               275 kernel/power/swap.c 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio               277 kernel/power/swap.c 		       (unsigned long long)bio->bi_iter.bi_sector);
bio               278 kernel/power/swap.c 		bio_put(bio);
bio               283 kernel/power/swap.c 		bio->bi_end_io = hib_end_io;
bio               284 kernel/power/swap.c 		bio->bi_private = hb;
bio               286 kernel/power/swap.c 		submit_bio(bio);
bio               288 kernel/power/swap.c 		error = submit_bio_wait(bio);
bio               289 kernel/power/swap.c 		bio_put(bio);
bio               758 kernel/trace/blktrace.c blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
bio               767 kernel/trace/blktrace.c 	if (!bio->bi_blkg)
bio               769 kernel/trace/blktrace.c 	return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
bio               773 kernel/trace/blktrace.c blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
bio               782 kernel/trace/blktrace.c 	if (!rq->bio)
bio               785 kernel/trace/blktrace.c 	return blk_trace_bio_get_cgid(q, rq->bio);
bio               867 kernel/trace/blktrace.c static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
bio               879 kernel/trace/blktrace.c 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio               880 kernel/trace/blktrace.c 			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
bio               881 kernel/trace/blktrace.c 			blk_trace_bio_get_cgid(q, bio));
bio               886 kernel/trace/blktrace.c 				     struct request_queue *q, struct bio *bio)
bio               888 kernel/trace/blktrace.c 	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
bio               892 kernel/trace/blktrace.c 				       struct request_queue *q, struct bio *bio,
bio               895 kernel/trace/blktrace.c 	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
bio               901 kernel/trace/blktrace.c 					struct bio *bio)
bio               903 kernel/trace/blktrace.c 	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
bio               909 kernel/trace/blktrace.c 					 struct bio *bio)
bio               911 kernel/trace/blktrace.c 	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
bio               915 kernel/trace/blktrace.c 				    struct request_queue *q, struct bio *bio)
bio               917 kernel/trace/blktrace.c 	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
bio               922 kernel/trace/blktrace.c 				struct bio *bio, int rw)
bio               924 kernel/trace/blktrace.c 	if (bio)
bio               925 kernel/trace/blktrace.c 		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
bio               941 kernel/trace/blktrace.c 				  struct bio *bio, int rw)
bio               943 kernel/trace/blktrace.c 	if (bio)
bio               944 kernel/trace/blktrace.c 		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
bio               990 kernel/trace/blktrace.c 				struct request_queue *q, struct bio *bio,
bio              1000 kernel/trace/blktrace.c 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
bio              1001 kernel/trace/blktrace.c 				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
bio              1002 kernel/trace/blktrace.c 				BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
bio              1003 kernel/trace/blktrace.c 				&rpdu, blk_trace_bio_get_cgid(q, bio));
bio              1022 kernel/trace/blktrace.c 				    struct request_queue *q, struct bio *bio,
bio              1036 kernel/trace/blktrace.c 	r.device_to   = cpu_to_be32(bio_dev(bio));
bio              1039 kernel/trace/blktrace.c 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio              1040 kernel/trace/blktrace.c 			bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
bio              1041 kernel/trace/blktrace.c 			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
bio                29 mm/page_io.c   static struct bio *get_swap_bio(gfp_t gfp_flags,
bio                32 mm/page_io.c   	struct bio *bio;
bio                34 mm/page_io.c   	bio = bio_alloc(gfp_flags, 1);
bio                35 mm/page_io.c   	if (bio) {
bio                38 mm/page_io.c   		bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
bio                39 mm/page_io.c   		bio_set_dev(bio, bdev);
bio                40 mm/page_io.c   		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio                41 mm/page_io.c   		bio->bi_end_io = end_io;
bio                43 mm/page_io.c   		bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
bio                45 mm/page_io.c   	return bio;
bio                48 mm/page_io.c   void end_swap_bio_write(struct bio *bio)
bio                50 mm/page_io.c   	struct page *page = bio_first_page_all(bio);
bio                52 mm/page_io.c   	if (bio->bi_status) {
bio                64 mm/page_io.c   			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
bio                65 mm/page_io.c   			 (unsigned long long)bio->bi_iter.bi_sector);
bio                69 mm/page_io.c   	bio_put(bio);
bio               120 mm/page_io.c   static void end_swap_bio_read(struct bio *bio)
bio               122 mm/page_io.c   	struct page *page = bio_first_page_all(bio);
bio               123 mm/page_io.c   	struct task_struct *waiter = bio->bi_private;
bio               125 mm/page_io.c   	if (bio->bi_status) {
bio               129 mm/page_io.c   			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
bio               130 mm/page_io.c   			 (unsigned long long)bio->bi_iter.bi_sector);
bio               138 mm/page_io.c   	WRITE_ONCE(bio->bi_private, NULL);
bio               139 mm/page_io.c   	bio_put(bio);
bio               280 mm/page_io.c   	struct bio *bio;
bio               333 mm/page_io.c   	bio = get_swap_bio(GFP_NOIO, page, end_write_func);
bio               334 mm/page_io.c   	if (bio == NULL) {
bio               340 mm/page_io.c   	bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
bio               341 mm/page_io.c   	bio_associate_blkg_from_page(bio, page);
bio               345 mm/page_io.c   	submit_bio(bio);
bio               352 mm/page_io.c   	struct bio *bio;
bio               389 mm/page_io.c   	bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
bio               390 mm/page_io.c   	if (bio == NULL) {
bio               395 mm/page_io.c   	disk = bio->bi_disk;
bio               400 mm/page_io.c   	bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio               402 mm/page_io.c   		bio->bi_opf |= REQ_HIPRI;
bio               404 mm/page_io.c   		bio->bi_private = current;
bio               407 mm/page_io.c   	bio_get(bio);
bio               408 mm/page_io.c   	qc = submit_bio(bio);
bio               411 mm/page_io.c   		if (!READ_ONCE(bio->bi_private))
bio               418 mm/page_io.c   	bio_put(bio);
bio               825 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
bio               826 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
bio               833 net/ceph/messenger.c 	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
bio               845 net/ceph/messenger.c 	struct page *page = bio_iter_page(it->bio, it->iter);
bio               848 net/ceph/messenger.c 	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
bio               850 net/ceph/messenger.c 	bio_advance_iter(it->bio, &it->iter, bytes);
bio               858 net/ceph/messenger.c 		       page == bio_iter_page(it->bio, it->iter)))
bio               862 net/ceph/messenger.c 		it->bio = it->bio->bi_next;
bio               863 net/ceph/messenger.c 		it->iter = it->bio->bi_iter;
bio               869 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
bio               870 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);