Searched refs:bio (Results 1 - 200 of 287) sorted by relevance

12

/linux-4.1.27/drivers/md/bcache/
H A Dio.c14 static unsigned bch_bio_max_sectors(struct bio *bio) bch_bio_max_sectors() argument
16 struct request_queue *q = bdev_get_queue(bio->bi_bdev); bch_bio_max_sectors()
21 if (bio->bi_rw & REQ_DISCARD) bch_bio_max_sectors()
22 return min(bio_sectors(bio), q->limits.max_discard_sectors); bch_bio_max_sectors()
24 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
26 .bi_bdev = bio->bi_bdev, bio_for_each_segment()
27 .bi_sector = bio->bi_iter.bi_sector, bio_for_each_segment()
29 .bi_rw = bio->bi_rw, bio_for_each_segment()
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
56 s->bio->bi_end_io = s->bi_end_io; bch_bio_submit_split_done()
57 s->bio->bi_private = s->bi_private; bch_bio_submit_split_done()
58 bio_endio_nodec(s->bio, 0); bch_bio_submit_split_done()
64 static void bch_bio_submit_split_endio(struct bio *bio, int error) bch_bio_submit_split_endio() argument
66 struct closure *cl = bio->bi_private; bch_bio_submit_split_endio()
70 clear_bit(BIO_UPTODATE, &s->bio->bi_flags); bch_bio_submit_split_endio()
72 bio_put(bio); bch_bio_submit_split_endio()
76 void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) bch_generic_make_request() argument
79 struct bio *n; bch_generic_make_request()
81 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) bch_generic_make_request()
84 if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) bch_generic_make_request()
90 s->bio = bio; bch_generic_make_request()
92 s->bi_end_io = bio->bi_end_io; bch_generic_make_request()
93 s->bi_private = bio->bi_private; bch_generic_make_request()
94 bio_get(bio); bch_generic_make_request()
97 n = bio_next_split(bio, bch_bio_max_sectors(bio), bch_generic_make_request()
105 } while (n != bio); bch_generic_make_request()
109 generic_make_request(bio); bch_generic_make_request()
114 void bch_bbio_free(struct bio *bio, struct cache_set *c) bch_bbio_free() argument
116 struct bbio *b = container_of(bio, struct bbio, bio); bch_bbio_free()
120 struct bio *bch_bbio_alloc(struct cache_set *c) bch_bbio_alloc()
123 struct bio *bio = &b->bio; bch_bbio_alloc() local
125 bio_init(bio); bch_bbio_alloc()
126 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; bch_bbio_alloc()
127 bio->bi_max_vecs = bucket_pages(c); bch_bbio_alloc()
128 bio->bi_io_vec = bio->bi_inline_vecs; bch_bbio_alloc()
130 return bio; bch_bbio_alloc()
133 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) __bch_submit_bbio() argument
135 struct bbio *b = container_of(bio, struct bbio, bio); __bch_submit_bbio()
137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); __bch_submit_bbio()
138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; __bch_submit_bbio()
141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); __bch_submit_bbio()
144 void bch_submit_bbio(struct bio *bio, struct cache_set *c, bch_submit_bbio() argument
147 struct bbio *b = container_of(bio, struct bbio, bio); bch_submit_bbio()
149 __bch_submit_bbio(bio, c); bch_submit_bbio()
206 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, bch_bbio_count_io_errors() argument
209 struct bbio *b = container_of(bio, struct bbio, bio); bch_bbio_count_io_errors()
212 unsigned threshold = bio->bi_rw & REQ_WRITE bch_bbio_count_io_errors()
235 void bch_bbio_endio(struct cache_set *c, struct bio *bio, bch_bbio_endio() argument
238 struct closure *cl = bio->bi_private; bch_bbio_endio()
240 bch_bbio_count_io_errors(c, bio, error, m); bch_bbio_endio()
241 bio_put(bio); bch_bbio_endio()
H A Dmovinggc.c18 struct bbio bio; member in struct:moving_io
46 struct bio *bio = &io->bio.bio; write_moving_finish() local
50 bio_for_each_segment_all(bv, bio, i) write_moving_finish()
63 static void read_moving_endio(struct bio *bio, int error) read_moving_endio() argument
65 struct bbio *b = container_of(bio, struct bbio, bio); read_moving_endio()
66 struct moving_io *io = container_of(bio->bi_private, read_moving_endio()
76 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); read_moving_endio()
81 struct bio *bio = &io->bio.bio; moving_init() local
83 bio_init(bio); moving_init()
84 bio_get(bio); moving_init()
85 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); moving_init()
87 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; moving_init()
88 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), moving_init()
90 bio->bi_private = &io->cl; moving_init()
91 bio->bi_io_vec = bio->bi_inline_vecs; moving_init()
92 bch_bio_map(bio, NULL); moving_init()
103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); write_moving()
105 op->bio = &io->bio.bio; write_moving()
122 struct bio *bio = &io->bio.bio; read_moving_submit() local
124 bch_submit_bbio(bio, io->op.c, &io->w->key, 0); read_moving_submit()
133 struct bio *bio; read_moving() local
164 bio = &io->bio.bio; read_moving()
166 bio->bi_rw = READ; read_moving()
167 bio->bi_end_io = read_moving_endio; read_moving()
169 if (bio_alloc_pages(bio, GFP_KERNEL)) read_moving()
H A Drequest.c28 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) cache_mode() argument
33 static bool verify(struct cached_dev *dc, struct bio *bio) verify() argument
38 static void bio_csum(struct bio *bio, struct bkey *k) bio_csum() argument
44 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
118 struct bio *bio = op->bio; bch_data_invalidate() local
121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); bch_data_invalidate()
123 while (bio_sectors(bio)) { bch_data_invalidate()
124 unsigned sectors = min(bio_sectors(bio), bch_data_invalidate()
130 bio->bi_iter.bi_sector += sectors; bch_data_invalidate()
131 bio->bi_iter.bi_size -= sectors << 9; bch_data_invalidate()
134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); bch_data_invalidate()
138 bio_put(bio); bch_data_invalidate()
173 static void bch_data_insert_endio(struct bio *bio, int error) bch_data_insert_endio() argument
175 struct closure *cl = bio->bi_private; bch_data_insert_endio()
188 bch_bbio_endio(op->c, bio, error, "writing data to cache"); bch_data_insert_endio()
194 struct bio *bio = op->bio, *n; bch_data_insert_start() local
196 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { bch_data_insert_start()
208 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); bch_data_insert_start()
224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); bch_data_insert_start()
226 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), bch_data_insert_start()
231 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); bch_data_insert_start()
253 } while (n != bio); bch_data_insert_start()
282 bio_put(bio); bch_data_insert_start()
314 trace_bcache_write(op->c, op->inode, op->bio, bch_data_insert()
318 bio_get(op->bio); bch_data_insert()
365 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) check_should_bypass() argument
368 unsigned mode = cache_mode(dc, bio); check_should_bypass()
375 (bio->bi_rw & REQ_DISCARD)) check_should_bypass()
380 (bio->bi_rw & REQ_WRITE))) check_should_bypass()
383 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || check_should_bypass()
384 bio_sectors(bio) & (c->sb.block_size - 1)) { check_should_bypass()
401 (bio->bi_rw & REQ_WRITE) && check_should_bypass()
402 (bio->bi_rw & REQ_SYNC)) check_should_bypass()
407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) check_should_bypass()
408 if (i->last == bio->bi_iter.bi_sector && check_should_bypass()
417 if (i->sequential + bio->bi_iter.bi_size > i->sequential) check_should_bypass()
418 i->sequential += bio->bi_iter.bi_size; check_should_bypass()
420 i->last = bio_end_sector(bio); check_should_bypass()
435 trace_bcache_bypass_sequential(bio); check_should_bypass()
440 trace_bcache_bypass_congested(bio); check_should_bypass()
445 bch_rescale_priorities(c, bio_sectors(bio)); check_should_bypass()
448 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); check_should_bypass()
458 struct bbio bio; member in struct:search
459 struct bio *orig_bio;
460 struct bio *cache_miss;
474 static void bch_cache_read_endio(struct bio *bio, int error) bch_cache_read_endio() argument
476 struct bbio *b = container_of(bio, struct bbio, bio); bch_cache_read_endio()
477 struct closure *cl = bio->bi_private; bch_cache_read_endio()
481 * If the bucket was reused while our bio was in flight, we might have bch_cache_read_endio()
495 bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); bch_cache_read_endio()
500 * the middle of the bio
505 struct bio *n, *bio = &s->bio.bio; cache_lookup_fn() local
509 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) cache_lookup_fn()
513 KEY_START(k) > bio->bi_iter.bi_sector) { cache_lookup_fn()
514 unsigned bio_sectors = bio_sectors(bio); cache_lookup_fn()
517 KEY_START(k) - bio->bi_iter.bi_sector) cache_lookup_fn()
520 int ret = s->d->cache_miss(b, s, bio, sectors); cache_lookup_fn()
539 n = bio_next_split(bio, min_t(uint64_t, INT_MAX, cache_lookup_fn()
540 KEY_OFFSET(k) - bio->bi_iter.bi_sector), cache_lookup_fn()
543 bio_key = &container_of(n, struct bbio, bio)->key; cache_lookup_fn()
553 * The bucket we're reading from might be reused while our bio cache_lookup_fn()
564 return n == bio ? MAP_DONE : MAP_CONTINUE; cache_lookup_fn()
570 struct bio *bio = &s->bio.bio; cache_lookup() local
576 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup()
586 static void request_endio(struct bio *bio, int error) request_endio() argument
588 struct closure *cl = bio->bi_private; request_endio()
597 bio_put(bio); request_endio()
613 static void do_bio_hook(struct search *s, struct bio *orig_bio) do_bio_hook()
615 struct bio *bio = &s->bio.bio; do_bio_hook() local
617 bio_init(bio); do_bio_hook()
618 __bio_clone_fast(bio, orig_bio); do_bio_hook()
619 bio->bi_end_io = request_endio; do_bio_hook()
620 bio->bi_private = &s->cl; do_bio_hook()
622 atomic_set(&bio->bi_cnt, 3); do_bio_hook()
630 if (s->iop.bio) search_free()
631 bio_put(s->iop.bio); search_free()
637 static inline struct search *search_alloc(struct bio *bio, search_alloc() argument
645 do_bio_hook(s, bio); search_alloc()
647 s->orig_bio = bio; search_alloc()
651 s->write = (bio->bi_rw & REQ_WRITE) != 0; search_alloc()
656 s->iop.bio = NULL; search_alloc()
662 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; search_alloc()
688 if (s->iop.bio) { cached_dev_cache_miss_done()
692 bio_for_each_segment_all(bv, s->iop.bio, i) cached_dev_cache_miss_done()
702 struct bio *bio = &s->bio.bio; cached_dev_read_error() local
713 closure_bio_submit(bio, cl, s->d); cached_dev_read_error()
729 * to the buffers the original bio pointed to: cached_dev_read_done()
732 if (s->iop.bio) { cached_dev_read_done()
733 bio_reset(s->iop.bio); cached_dev_read_done()
734 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; cached_dev_read_done()
735 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; cached_dev_read_done()
736 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cached_dev_read_done()
737 bch_bio_map(s->iop.bio, NULL); cached_dev_read_done()
739 bio_copy_data(s->cache_miss, s->iop.bio); cached_dev_read_done()
745 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) cached_dev_read_done()
750 if (s->iop.bio && cached_dev_read_done()
770 else if (s->iop.bio || verify(dc, &s->bio.bio)) cached_dev_read_done_bh()
777 struct bio *bio, unsigned sectors) cached_dev_cache_miss()
782 struct bio *miss, *cache_bio; cached_dev_cache_miss()
785 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); cached_dev_cache_miss()
786 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; cached_dev_cache_miss()
790 if (!(bio->bi_rw & REQ_RAHEAD) && cached_dev_cache_miss()
791 !(bio->bi_rw & REQ_META) && cached_dev_cache_miss()
794 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); cached_dev_cache_miss()
796 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); cached_dev_cache_miss()
799 bio->bi_iter.bi_sector + s->insert_bio_sectors, cached_dev_cache_miss()
808 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); cached_dev_cache_miss()
811 ret = miss == bio ? MAP_DONE : -EINTR; cached_dev_cache_miss()
834 s->iop.bio = cache_bio; cached_dev_cache_miss()
870 struct bio *bio = &s->bio.bio; cached_dev_write() local
871 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); cached_dev_write()
872 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); cached_dev_write()
893 if (bio->bi_rw & REQ_DISCARD) cached_dev_write()
897 cache_mode(dc, bio), cached_dev_write()
904 s->iop.bio = s->orig_bio; cached_dev_write()
905 bio_get(s->iop.bio); cached_dev_write()
907 if (!(bio->bi_rw & REQ_DISCARD) || cached_dev_write()
909 closure_bio_submit(bio, cl, s->d); cached_dev_write()
912 s->iop.bio = bio; cached_dev_write()
914 if (bio->bi_rw & REQ_FLUSH) { cached_dev_write()
916 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, cached_dev_write()
920 flush->bi_bdev = bio->bi_bdev; cached_dev_write()
927 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); cached_dev_write()
929 closure_bio_submit(bio, cl, s->d); cached_dev_write()
939 struct bio *bio = &s->bio.bio; cached_dev_nodata() local
945 closure_bio_submit(bio, cl, s->d); cached_dev_nodata()
952 static void cached_dev_make_request(struct request_queue *q, struct bio *bio) cached_dev_make_request() argument
955 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; cached_dev_make_request()
957 int rw = bio_data_dir(bio); cached_dev_make_request()
959 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); cached_dev_make_request()
961 bio->bi_bdev = dc->bdev; cached_dev_make_request()
962 bio->bi_iter.bi_sector += dc->sb.data_offset; cached_dev_make_request()
965 s = search_alloc(bio, d); cached_dev_make_request()
966 trace_bcache_request_start(s->d, bio); cached_dev_make_request()
968 if (!bio->bi_iter.bi_size) { cached_dev_make_request()
977 s->iop.bypass = check_should_bypass(dc, bio); cached_dev_make_request()
985 if ((bio->bi_rw & REQ_DISCARD) && cached_dev_make_request()
987 bio_endio(bio, 0); cached_dev_make_request()
989 bch_generic_make_request(bio, &d->bio_split_hook); cached_dev_make_request()
1038 struct bio *bio, unsigned sectors) flash_dev_cache_miss()
1040 unsigned bytes = min(sectors, bio_sectors(bio)) << 9; flash_dev_cache_miss()
1042 swap(bio->bi_iter.bi_size, bytes); flash_dev_cache_miss()
1043 zero_fill_bio(bio); flash_dev_cache_miss()
1044 swap(bio->bi_iter.bi_size, bytes); flash_dev_cache_miss()
1046 bio_advance(bio, bytes); flash_dev_cache_miss()
1048 if (!bio->bi_iter.bi_size) flash_dev_cache_miss()
1064 static void flash_dev_make_request(struct request_queue *q, struct bio *bio) flash_dev_make_request() argument
1068 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; flash_dev_make_request()
1069 int rw = bio_data_dir(bio); flash_dev_make_request()
1071 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); flash_dev_make_request()
1073 s = search_alloc(bio, d); flash_dev_make_request()
1075 bio = &s->bio.bio; flash_dev_make_request()
1077 trace_bcache_request_start(s->d, bio); flash_dev_make_request()
1079 if (!bio->bi_iter.bi_size) { flash_dev_make_request()
1089 &KEY(d->id, bio->bi_iter.bi_sector, 0), flash_dev_make_request()
1090 &KEY(d->id, bio_end_sector(bio), 0)); flash_dev_make_request()
1092 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; flash_dev_make_request()
1094 s->iop.bio = bio; flash_dev_make_request()
776 cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) cached_dev_cache_miss() argument
1037 flash_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) flash_dev_cache_miss() argument
H A Ddebug.h4 struct bio;
11 void bch_data_verify(struct cached_dev *, struct bio *);
20 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} bch_data_verify() argument
H A Dwriteback.c102 struct bio bio; member in struct:dirty_io
108 struct bio *bio = &io->bio; dirty_init() local
110 bio_init(bio); dirty_init()
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); dirty_init()
114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; dirty_init()
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); dirty_init()
116 bio->bi_private = w; dirty_init()
117 bio->bi_io_vec = bio->bi_inline_vecs; dirty_init()
118 bch_bio_map(bio, NULL); dirty_init()
130 struct keybuf_key *w = io->bio.bi_private; write_dirty_finish()
135 bio_for_each_segment_all(bv, &io->bio, i) write_dirty_finish()
169 static void dirty_endio(struct bio *bio, int error) dirty_endio() argument
171 struct keybuf_key *w = bio->bi_private; dirty_endio()
183 struct keybuf_key *w = io->bio.bi_private; write_dirty()
186 io->bio.bi_rw = WRITE; write_dirty()
187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); write_dirty()
188 io->bio.bi_bdev = io->dc->bdev; write_dirty()
189 io->bio.bi_end_io = dirty_endio; write_dirty()
191 closure_bio_submit(&io->bio, cl, &io->dc->disk); write_dirty()
196 static void read_dirty_endio(struct bio *bio, int error) read_dirty_endio() argument
198 struct keybuf_key *w = bio->bi_private; read_dirty_endio()
204 dirty_endio(bio, error); read_dirty_endio()
211 closure_bio_submit(&io->bio, cl, &io->dc->disk); read_dirty_submit()
256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); read_dirty()
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, read_dirty()
259 io->bio.bi_rw = READ; read_dirty()
260 io->bio.bi_end_io = read_dirty_endio; read_dirty()
262 if (bio_alloc_pages(&io->bio, GFP_KERNEL)) read_dirty()
H A Drequest.h7 struct bio *bio; member in struct:data_insert_op
H A Ddebug.c34 struct bio *bio; bch_btree_verify() local
51 bio = bch_bbio_alloc(b->c); bch_btree_verify()
52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bch_btree_verify()
53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bch_btree_verify()
54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bch_btree_verify()
55 bch_bio_map(bio, sorted); bch_btree_verify()
57 submit_bio_wait(REQ_META|READ_SYNC, bio); bch_btree_verify()
58 bch_bbio_free(bio, b->c); bch_btree_verify()
105 void bch_data_verify(struct cached_dev *dc, struct bio *bio) bch_data_verify() argument
108 struct bio *check; bch_data_verify()
113 check = bio_clone(bio, GFP_NOIO); bch_data_verify()
122 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
132 (uint64_t) bio->bi_iter.bi_sector); bio_for_each_segment()
H A Dwriteback.h42 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, should_writeback() argument
53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, should_writeback()
54 bio_sectors(bio))) should_writeback()
60 return bio->bi_rw & REQ_SYNC || should_writeback()
H A Djournal.c27 static void journal_read_endio(struct bio *bio, int error) journal_read_endio() argument
29 struct closure *cl = bio->bi_private; journal_read_endio()
37 struct bio *bio = &ja->bio; journal_read_bucket() local
54 bio_reset(bio); journal_read_bucket()
55 bio->bi_iter.bi_sector = bucket + offset; journal_read_bucket()
56 bio->bi_bdev = ca->bdev; journal_read_bucket()
57 bio->bi_rw = READ; journal_read_bucket()
58 bio->bi_iter.bi_size = len << 9; journal_read_bucket()
60 bio->bi_end_io = journal_read_endio; journal_read_bucket()
61 bio->bi_private = &cl; journal_read_bucket()
62 bch_bio_map(bio, data); journal_read_bucket()
64 closure_bio_submit(bio, &cl, ca); journal_read_bucket()
404 static void journal_discard_endio(struct bio *bio, int error) journal_discard_endio() argument
407 container_of(bio, struct journal_device, discard_bio); journal_discard_endio()
427 struct bio *bio = &ja->discard_bio; do_journal_discard() local
451 bio_init(bio); do_journal_discard()
452 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, do_journal_discard()
454 bio->bi_bdev = ca->bdev; do_journal_discard()
455 bio->bi_rw = REQ_WRITE|REQ_DISCARD; do_journal_discard()
456 bio->bi_max_vecs = 1; do_journal_discard()
457 bio->bi_io_vec = bio->bi_inline_vecs; do_journal_discard()
458 bio->bi_iter.bi_size = bucket_bytes(ca); do_journal_discard()
459 bio->bi_end_io = journal_discard_endio; do_journal_discard()
550 static void journal_write_endio(struct bio *bio, int error) journal_write_endio() argument
552 struct journal_write *w = bio->bi_private; journal_write_endio()
589 struct bio *bio; variable in typeref:struct:bio
620 bio = &ca->journal.bio;
624 bio_reset(bio); variable
625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
626 bio->bi_bdev = ca->bdev;
627 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
628 bio->bi_iter.bi_size = sectors << 9;
630 bio->bi_end_io = journal_write_endio;
631 bio->bi_private = w;
632 bch_bio_map(bio, w->data);
634 trace_bcache_journal_write(bio); variable
635 bio_list_add(&list, bio);
648 while ((bio = bio_list_pop(&list)))
649 closure_bio_submit(bio, cl, c->cache[0]);
H A Dsuper.c224 static void write_bdev_super_endio(struct bio *bio, int error) write_bdev_super_endio() argument
226 struct cached_dev *dc = bio->bi_private; write_bdev_super_endio()
232 static void __write_super(struct cache_sb *sb, struct bio *bio) __write_super() argument
234 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); __write_super()
237 bio->bi_iter.bi_sector = SB_SECTOR; __write_super()
238 bio->bi_rw = REQ_SYNC|REQ_META; __write_super()
239 bio->bi_iter.bi_size = SB_SIZE; __write_super()
240 bch_bio_map(bio, NULL); __write_super()
264 submit_bio(REQ_WRITE, bio); __write_super()
277 struct bio *bio = &dc->sb_bio; bch_write_bdev_super() local
282 bio_reset(bio); bch_write_bdev_super()
283 bio->bi_bdev = dc->bdev; bch_write_bdev_super()
284 bio->bi_end_io = write_bdev_super_endio; bch_write_bdev_super()
285 bio->bi_private = dc; bch_write_bdev_super()
288 __write_super(&dc->sb, bio); bch_write_bdev_super()
293 static void write_super_endio(struct bio *bio, int error) write_super_endio() argument
295 struct cache *ca = bio->bi_private; write_super_endio()
320 struct bio *bio = &ca->sb_bio; for_each_cache() local
328 bio_reset(bio); for_each_cache()
329 bio->bi_bdev = ca->bdev; for_each_cache()
330 bio->bi_end_io = write_super_endio; for_each_cache()
331 bio->bi_private = ca; for_each_cache()
334 __write_super(&ca->sb, bio); for_each_cache()
342 static void uuid_endio(struct bio *bio, int error) uuid_endio() argument
344 struct closure *cl = bio->bi_private; uuid_endio()
348 bch_bbio_free(bio, c); uuid_endio()
372 struct bio *bio = bch_bbio_alloc(c); uuid_io() local
374 bio->bi_rw = REQ_SYNC|REQ_META|rw; uuid_io()
375 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; uuid_io()
377 bio->bi_end_io = uuid_endio; uuid_io()
378 bio->bi_private = cl; uuid_io()
379 bch_bio_map(bio, c->uuids); uuid_io()
381 bch_submit_bbio(bio, c, k, i); uuid_io()
515 static void prio_endio(struct bio *bio, int error) prio_endio() argument
517 struct cache *ca = bio->bi_private; prio_endio()
520 bch_bbio_free(bio, ca->set); prio_endio()
527 struct bio *bio = bch_bbio_alloc(ca->set); prio_io() local
531 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; prio_io()
532 bio->bi_bdev = ca->bdev; prio_io()
533 bio->bi_rw = REQ_SYNC|REQ_META|rw; prio_io()
534 bio->bi_iter.bi_size = bucket_bytes(ca); prio_io()
536 bio->bi_end_io = prio_endio; prio_io()
537 bio->bi_private = ca; prio_io()
538 bch_bio_map(bio, ca->disk_buckets); prio_io()
540 closure_bio_submit(bio, &ca->prio, ca); prio_io()
814 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || bcache_device_init()
1550 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || bch_cache_set_alloc()
1847 bio_init(&ca->journal.bio); cache_alloc()
1848 ca->journal.bio.bi_max_vecs = 8; cache_alloc()
1849 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; cache_alloc()
H A Dutil.c8 #include <linux/bio.h>
225 void bch_bio_map(struct bio *bio, void *base) bch_bio_map() argument
227 size_t size = bio->bi_iter.bi_size; bch_bio_map()
228 struct bio_vec *bv = bio->bi_io_vec; bch_bio_map()
230 BUG_ON(!bio->bi_iter.bi_size); bch_bio_map()
231 BUG_ON(bio->bi_vcnt); bch_bio_map()
236 for (; size; bio->bi_vcnt++, bv++) { bch_bio_map()
H A Dbtree.c281 static void btree_node_read_endio(struct bio *bio, int error) btree_node_read_endio() argument
283 struct closure *cl = bio->bi_private; btree_node_read_endio()
291 struct bio *bio; bch_btree_node_read() local
297 bio = bch_bbio_alloc(b->c); bch_btree_node_read()
298 bio->bi_rw = REQ_META|READ_SYNC; bch_btree_node_read()
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bch_btree_node_read()
300 bio->bi_end_io = btree_node_read_endio; bch_btree_node_read()
301 bio->bi_private = &cl; bch_btree_node_read()
303 bch_bio_map(bio, b->keys.set[0].data); bch_btree_node_read()
305 bch_submit_bbio(bio, b->c, &b->key, 0); bch_btree_node_read()
308 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) bch_btree_node_read()
311 bch_bbio_free(bio, b->c); bch_btree_node_read()
352 bch_bbio_free(b->bio, b->c); __btree_node_write_done()
353 b->bio = NULL; __btree_node_write_done()
368 bio_for_each_segment_all(bv, b->bio, n) btree_node_write_done()
374 static void btree_node_write_endio(struct bio *bio, int error) btree_node_write_endio() argument
376 struct closure *cl = bio->bi_private; btree_node_write_endio()
382 bch_bbio_count_io_errors(b->c, bio, error, "writing btree"); btree_node_write_endio()
395 BUG_ON(b->bio); do_btree_node_write()
396 b->bio = bch_bbio_alloc(b->c); do_btree_node_write()
398 b->bio->bi_end_io = btree_node_write_endio; do_btree_node_write()
399 b->bio->bi_private = cl; do_btree_node_write()
400 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; do_btree_node_write()
401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); do_btree_node_write()
402 bch_bio_map(b->bio, i); do_btree_node_write()
423 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { do_btree_node_write()
428 bio_for_each_segment_all(bv, b->bio, j) do_btree_node_write()
432 bch_submit_bbio(b->bio, b->c, &k.key, 0); do_btree_node_write()
436 b->bio->bi_vcnt = 0; do_btree_node_write()
437 bch_bio_map(b->bio, i); do_btree_node_write()
439 bch_submit_bbio(b->bio, b->c, &k.key, 0); do_btree_node_write()
H A Dbcache.h181 #include <linux/bio.h>
254 struct bio *bio; member in struct:bio_split_hook
289 struct bio *, unsigned);
311 struct bio sb_bio;
398 struct bio sb_bio;
581 * For any bio we don't skip we subtract the number of sectors from
694 struct bio bio; member in struct:bbio
870 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
872 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
873 void bch_bbio_free(struct bio *, struct cache_set *);
874 struct bio *bch_bbio_alloc(struct cache_set *);
876 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
877 void __bch_submit_bbio(struct bio *, struct cache_set *);
878 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
H A Djournal.h148 struct bio discard_bio;
152 struct bio bio; member in struct:journal_device
H A Dclosure.h41 * foo_endio(struct bio *bio, int error)
61 * second bio was submitted - which is almost always not what you want! More
70 * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
H A Dutil.h572 void bch_bio_map(struct bio *bio, void *base);
579 #define closure_bio_submit(bio, cl, dev) \
582 bch_generic_make_request(bio, &(dev)->bio_split_hook); \
H A Dbtree.h146 struct bio *bio; member in struct:btree
/linux-4.1.27/kernel/power/
H A Dblock_io.c10 #include <linux/bio.h>
24 * Straight from the textbook - allocate and initialize the bio.
29 struct page *page, struct bio **bio_chain) submit()
32 struct bio *bio; submit() local
34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); submit()
35 bio->bi_iter.bi_sector = sector; submit()
36 bio->bi_bdev = bdev; submit()
37 bio->bi_end_io = end_swap_bio_read; submit()
39 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { submit()
40 printk(KERN_ERR "PM: Adding page to bio failed at %llu\n", submit()
42 bio_put(bio); submit()
47 bio_get(bio); submit()
50 submit_bio(bio_rw, bio); submit()
53 bio_set_pages_dirty(bio); submit()
54 bio_put(bio); submit()
58 bio->bi_private = *bio_chain; submit()
59 *bio_chain = bio; submit()
60 submit_bio(bio_rw, bio); submit()
65 int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain) hib_bio_read_page()
71 int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain) hib_bio_write_page()
77 int hib_wait_on_bio_chain(struct bio **bio_chain) hib_wait_on_bio_chain()
79 struct bio *bio; hib_wait_on_bio_chain() local
80 struct bio *next_bio; hib_wait_on_bio_chain()
86 bio = *bio_chain; hib_wait_on_bio_chain()
87 if (bio == NULL) hib_wait_on_bio_chain()
89 while (bio) { hib_wait_on_bio_chain()
92 next_bio = bio->bi_private; hib_wait_on_bio_chain()
93 page = bio->bi_io_vec[0].bv_page; hib_wait_on_bio_chain()
98 bio_put(bio); hib_wait_on_bio_chain()
99 bio = next_bio; hib_wait_on_bio_chain()
H A Dswap.c21 #include <linux/bio.h>
277 static int write_page(void *buf, sector_t offset, struct bio **bio_chain) write_page()
351 struct bio **bio_chain) swap_write_page()
448 struct bio *bio; save_image() local
458 bio = NULL; save_image()
464 ret = swap_write_page(handle, data_of(*snapshot), &bio); save_image()
472 err2 = hib_wait_on_bio_chain(&bio); save_image()
583 struct bio *bio; save_image_lzo() local
677 bio = NULL; save_image_lzo()
751 ret = swap_write_page(handle, page, &bio); save_image_lzo()
762 err2 = hib_wait_on_bio_chain(&bio); save_image_lzo()
922 struct bio **bio_chain) swap_read_page()
971 struct bio *bio; load_image() local
981 bio = NULL; load_image()
987 ret = swap_read_page(handle, data_of(*snapshot), &bio); load_image()
991 ret = hib_wait_on_bio_chain(&bio); load_image()
999 err2 = hib_wait_on_bio_chain(&bio); load_image()
1070 struct bio *bio; load_image_lzo() local
1193 bio = NULL; load_image_lzo()
1202 ret = swap_read_page(handle, page[ring], &bio); load_image_lzo()
1229 ret = hib_wait_on_bio_chain(&bio); load_image_lzo()
1284 ret = hib_wait_on_bio_chain(&bio); load_image_lzo()
H A Dpower.h170 struct bio **bio_chain);
172 struct bio **bio_chain);
173 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
/linux-4.1.27/include/linux/
H A Dbio.h32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
48 * upper 16 bits of bi_rw define the io priority of this bio
51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
54 #define bio_set_prio(bio, prio) do { \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
83 #define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
86 #define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88 #define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90 #define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
93 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
97 #define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
99 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
103 * Check whether this bio carries any data or not. A NULL bio is allowed.
105 static inline bool bio_has_data(struct bio *bio) bio_has_data() argument
107 if (bio && bio_has_data()
108 bio->bi_iter.bi_size && bio_has_data()
109 !(bio->bi_rw & REQ_DISCARD)) bio_has_data()
115 static inline bool bio_is_rw(struct bio *bio) bio_is_rw() argument
117 if (!bio_has_data(bio)) bio_is_rw()
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) bio_is_rw()
126 static inline bool bio_mergeable(struct bio *bio) bio_mergeable() argument
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS) bio_mergeable()
134 static inline unsigned int bio_cur_bytes(struct bio *bio) bio_cur_bytes() argument
136 if (bio_has_data(bio)) bio_cur_bytes()
137 return bio_iovec(bio).bv_len; bio_cur_bytes()
139 return bio->bi_iter.bi_size; bio_cur_bytes()
142 static inline void *bio_data(struct bio *bio) bio_data() argument
144 if (bio_has_data(bio)) bio_data()
145 return page_address(bio_page(bio)) + bio_offset(bio); bio_data()
153 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
162 #define __bio_kmap_atomic(bio, iter) \
163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
164 bio_iter_iovec((bio), (iter)).bv_offset)
198 #define bio_io_error(bio) bio_endio((bio), -EIO)
201 * drivers should _never_ use the all version - the bio may have been split
204 #define bio_for_each_segment_all(bvl, bio, i) \
205 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
234 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, bio_advance_iter() argument
239 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) bio_advance_iter()
242 bvec_iter_advance(bio->bi_io_vec, iter, bytes); bio_advance_iter()
245 #define __bio_for_each_segment(bvl, bio, iter, start) \
248 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
249 bio_advance_iter((bio), &(iter), (bvl).bv_len))
251 #define bio_for_each_segment(bvl, bio, iter) \
252 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
256 static inline unsigned bio_segments(struct bio *bio) bio_segments() argument
267 if (bio->bi_rw & REQ_DISCARD) bio_segments()
270 if (bio->bi_rw & REQ_WRITE_SAME) bio_segments()
273 bio_for_each_segment(bv, bio, iter) bio_segments()
280 * get a reference to a bio, so it won't disappear. the intended use is
283 * bio_get(bio);
284 * submit_bio(rw, bio);
285 * if (bio->bi_flags ...)
287 * bio_put(bio);
290 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
293 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
305 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) bio_integrity() argument
307 if (bio->bi_rw & REQ_INTEGRITY) bio_integrity()
308 return bio->bi_integrity; bio_integrity()
314 * bio integrity payload
317 struct bio *bip_bio; /* parent bio */
334 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) bio_integrity_flagged() argument
336 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_flagged()
357 extern void bio_trim(struct bio *bio, int offset, int size);
358 extern struct bio *bio_split(struct bio *bio, int sectors,
362 * bio_next_split - get next @sectors from a bio, splitting if necessary
363 * @bio: bio to split
364 * @sectors: number of sectors to split from the front of @bio
366 * @bs: bio set to allocate from
368 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
369 * than @sectors, returns the original bio unchanged.
371 static inline struct bio *bio_next_split(struct bio *bio, int sectors, bio_next_split() argument
374 if (sectors >= bio_sectors(bio)) bio_next_split()
375 return bio; bio_next_split()
377 return bio_split(bio, sectors, gfp, bs); bio_next_split()
385 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
386 extern void bio_put(struct bio *);
388 extern void __bio_clone_fast(struct bio *, struct bio *);
389 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
390 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
394 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) bio_alloc()
399 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) bio_clone() argument
401 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); bio_clone()
404 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) bio_kmalloc()
409 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) bio_clone_kmalloc() argument
411 return bio_clone_bioset(bio, gfp_mask, NULL); bio_clone_kmalloc()
415 extern void bio_endio(struct bio *, int);
416 extern void bio_endio_nodec(struct bio *, int);
418 extern int bio_phys_segments(struct request_queue *, struct bio *);
420 extern int submit_bio_wait(int rw, struct bio *bio);
421 extern void bio_advance(struct bio *, unsigned);
423 extern void bio_init(struct bio *);
424 extern void bio_reset(struct bio *);
425 void bio_chain(struct bio *, struct bio *);
427 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
428 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
432 extern struct bio *bio_map_user_iov(struct request_queue *,
434 extern void bio_unmap_user(struct bio *);
435 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
437 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
439 extern void bio_set_pages_dirty(struct bio *bio);
440 extern void bio_check_pages_dirty(struct bio *bio);
451 extern void bio_flush_dcache_pages(struct bio *bi);
453 static inline void bio_flush_dcache_pages(struct bio *bi) bio_flush_dcache_pages()
458 extern void bio_copy_data(struct bio *dst, struct bio *src);
459 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
461 extern struct bio *bio_copy_user_iov(struct request_queue *,
465 extern int bio_uncopy_user(struct bio *);
466 void zero_fill_bio(struct bio *bio);
472 int bio_associate_current(struct bio *bio);
473 void bio_disassociate_task(struct bio *bio);
475 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } bio_disassociate_task() argument
476 static inline void bio_disassociate_task(struct bio *bio) { } bio_disassociate_task() argument
520 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, __bio_kmap_irq() argument
523 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); __bio_kmap_irq()
527 #define bio_kmap_irq(bio, flags) \
528 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
535 * member of the bio. The bio_list also caches the last list member to allow
539 struct bio *head;
540 struct bio *tail;
555 #define bio_list_for_each(bio, bl) \
556 for (bio = (bl)->head; bio; bio = bio->bi_next)
561 struct bio *bio; bio_list_size() local
563 bio_list_for_each(bio, bl) bio_list_size()
569 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) bio_list_add() argument
571 bio->bi_next = NULL; bio_list_add()
574 bl->tail->bi_next = bio; bio_list_add()
576 bl->head = bio; bio_list_add()
578 bl->tail = bio; bio_list_add()
581 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) bio_list_add_head() argument
583 bio->bi_next = bl->head; bio_list_add_head()
585 bl->head = bio; bio_list_add_head()
588 bl->tail = bio; bio_list_add_head()
618 static inline struct bio *bio_list_peek(struct bio_list *bl) bio_list_peek()
623 static inline struct bio *bio_list_pop(struct bio_list *bl) bio_list_pop()
625 struct bio *bio = bl->head; bio_list_pop() local
627 if (bio) { bio_list_pop()
632 bio->bi_next = NULL; bio_list_pop()
635 return bio; bio_list_pop()
638 static inline struct bio *bio_list_get(struct bio_list *bl) bio_list_get()
640 struct bio *bio = bl->head; bio_list_get() local
644 return bio; bio_list_get()
649 * allocate their own private memory pools for bio and iovec structures.
699 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
700 extern void bio_integrity_free(struct bio *);
701 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
702 extern bool bio_integrity_enabled(struct bio *bio);
703 extern int bio_integrity_prep(struct bio *);
704 extern void bio_integrity_endio(struct bio *, int);
705 extern void bio_integrity_advance(struct bio *, unsigned int);
706 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
707 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
714 static inline void *bio_integrity(struct bio *bio) bio_integrity() argument
719 static inline bool bio_integrity_enabled(struct bio *bio) bio_integrity_enabled() argument
734 static inline int bio_integrity_prep(struct bio *bio) bio_integrity_prep() argument
739 static inline void bio_integrity_free(struct bio *bio) bio_integrity_free() argument
744 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, bio_integrity_clone() argument
750 static inline void bio_integrity_advance(struct bio *bio, bio_integrity_advance() argument
756 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, bio_integrity_trim() argument
767 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) bio_integrity_flagged() argument
H A Dblk_types.h11 struct bio;
17 typedef void (bio_end_io_t) (struct bio *, int);
18 typedef void (bio_destructor_t) (struct bio *);
46 struct bio { struct
47 struct bio *bi_next; /* request queue link */
63 * sizes of the first and last mergeable segments in this bio.
75 * Optional ioc and css associated with this bio. Put on bio
102 * We can inline a number of vecs at the end of the bio, to avoid
104 * MUST obviously be kept at the very end of the bio.
109 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
112 * bio flags
119 #define BIO_BOUNCED 5 /* bio is a bounce bio */
124 #define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
133 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
136 * top 4 bits of bio flags indicate the pool this bio came from
142 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
148 * bi_rw of struct bio. Note that some flags are only valid in either one.
169 /* bio only flags */
171 __REQ_THROTTLED, /* This bio has already been subjected to
221 /* This mask is used for both bio and request merge checking */
H A Ddm-region-hash.h50 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
79 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
81 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
H A Delevator.h13 struct bio *);
19 typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
22 struct request *, struct bio *);
34 struct bio *, gfp_t);
124 extern int elv_merge(struct request_queue *, struct request **, struct bio *);
129 struct bio *);
138 struct bio *bio, gfp_t gfp_mask);
158 extern bool elv_rq_merge_ok(struct request *, struct bio *);
H A Ddevice-mapper.h11 #include <linux/bio.h>
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
66 struct bio *bio, int error);
197 * Some targets need to be sent the same WRITE bio severals times so
199 * examines any supplied bio and returns the number of copies of it the
202 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
219 * The bio number can be accessed with dm_bio_get_target_bio_nr.
227 * The bio number can be accessed with dm_bio_get_target_bio_nr.
233 * The bio number can be accessed with dm_bio_get_target_bio_nr.
238 * The minimum number of extra bytes allocated in each bio for the
287 * For bio-based dm.
288 * One of these is allocated for each bio.
298 struct bio clone;
301 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) dm_per_bio_data() argument
303 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; dm_per_bio_data()
306 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) dm_bio_from_per_bio_data()
308 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); dm_bio_from_per_bio_data()
311 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) dm_bio_get_target_bio_nr() argument
313 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; dm_bio_get_target_bio_nr()
409 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
H A Dpktcdvd.h23 /* default bio write queue congestion marks */
117 struct bio *w_bio; /* The bio we will send to the real CD */
132 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
145 struct bio *bio; member in struct:pkt_rb_node
150 struct bio *bio; /* Original read request bio */ member in struct:packet_stacked_data
H A Ddm-io.h44 struct bio *bio; member in union:dm_io_memory::__anon11610
H A Dblkdev.h18 #include <linux/bio.h>
120 struct bio *bio; member in struct:request
121 struct bio *biotail;
237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
700 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) blk_write_same_mergeable()
741 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
747 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) blk_queue_bounce() argument
763 struct bio *bio; member in struct:req_iterator
770 if ((rq->bio)) \
771 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
774 __rq_for_each_bio(_iter.bio, _rq) \
775 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
778 (_iter.bio->bi_next == NULL && \
794 extern void generic_make_request(struct bio *bio);
799 extern struct request *blk_make_request(struct request_queue *, struct bio *,
809 int (*bio_ctr)(struct bio *, struct bio *, void *),
815 extern void blk_recount_segments(struct request_queue *, struct bio *);
853 extern int blk_rq_unmap_user(struct bio *);
888 return rq->bio ? bio_cur_bytes(rq->bio) : 0; blk_rq_cur_bytes()
946 struct bio *bio; blk_rq_count_bios() local
948 __rq_for_each_bio(bio, rq) blk_rq_count_bios()
1494 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1496 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1500 struct bio *);
1532 struct bio;
1542 struct bio *b) blk_rq_count_integrity_sg()
1547 struct bio *b, blk_rq_map_integrity_sg()
1588 struct bio *b) blk_integrity_merge_bio()
H A Dswap.h18 struct bio;
376 extern void end_swap_bio_write(struct bio *bio, int err);
378 void (*end_write_func)(struct bio *, int));
380 extern void end_swap_bio_read(struct bio *bio, int err);
H A Dbuffer_head.h57 * is the bio, and buffer_heads are used for extracting block
59 * a page (via a page_mapping) and for wrapping bio submission
/linux-4.1.27/block/
H A Dblk-map.c6 #include <linux/bio.h>
13 struct bio *bio) blk_rq_append_bio()
15 if (!rq->bio) blk_rq_append_bio()
16 blk_rq_bio_prep(q, rq, bio); blk_rq_append_bio()
17 else if (!ll_back_merge_fn(q, rq, bio)) blk_rq_append_bio()
20 rq->biotail->bi_next = bio; blk_rq_append_bio()
21 rq->biotail = bio; blk_rq_append_bio()
23 rq->__data_len += bio->bi_iter.bi_size; blk_rq_append_bio()
28 static int __blk_rq_unmap_user(struct bio *bio) __blk_rq_unmap_user() argument
32 if (bio) { __blk_rq_unmap_user()
33 if (bio_flagged(bio, BIO_USER_MAPPED)) __blk_rq_unmap_user()
34 bio_unmap_user(bio); __blk_rq_unmap_user()
36 ret = bio_uncopy_user(bio); __blk_rq_unmap_user()
57 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
60 * original bio must be passed back in to blk_rq_unmap_user() for proper
67 struct bio *bio; blk_rq_map_user_iov() local
89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); blk_rq_map_user_iov()
91 bio = bio_map_user_iov(q, iter, gfp_mask); blk_rq_map_user_iov()
93 if (IS_ERR(bio)) blk_rq_map_user_iov()
94 return PTR_ERR(bio); blk_rq_map_user_iov()
97 bio->bi_flags |= (1 << BIO_NULL_MAPPED); blk_rq_map_user_iov()
99 if (bio->bi_iter.bi_size != iter->count) { blk_rq_map_user_iov()
101 * Grab an extra reference to this bio, as bio_unmap_user() blk_rq_map_user_iov()
105 bio_get(bio); blk_rq_map_user_iov()
106 bio_endio(bio, 0); blk_rq_map_user_iov()
107 __blk_rq_unmap_user(bio); blk_rq_map_user_iov()
111 if (!bio_flagged(bio, BIO_USER_MAPPED)) blk_rq_map_user_iov()
114 blk_queue_bounce(q, &bio); blk_rq_map_user_iov()
115 bio_get(bio); blk_rq_map_user_iov()
116 blk_rq_bio_prep(q, rq, bio); blk_rq_map_user_iov()
138 * @bio: start of bio list
142 * supply the original rq->bio from the blk_rq_map_user() return, since
143 * the I/O completion may have changed rq->bio.
145 int blk_rq_unmap_user(struct bio *bio) blk_rq_unmap_user() argument
147 struct bio *mapped_bio; blk_rq_unmap_user()
150 while (bio) { blk_rq_unmap_user()
151 mapped_bio = bio; blk_rq_unmap_user()
152 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) blk_rq_unmap_user()
153 mapped_bio = bio->bi_private; blk_rq_unmap_user()
159 mapped_bio = bio; blk_rq_unmap_user()
160 bio = bio->bi_next; blk_rq_unmap_user()
187 struct bio *bio; blk_rq_map_kern() local
197 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); blk_rq_map_kern()
199 bio = bio_map_kern(q, kbuf, len, gfp_mask); blk_rq_map_kern()
201 if (IS_ERR(bio)) blk_rq_map_kern()
202 return PTR_ERR(bio); blk_rq_map_kern()
205 bio->bi_rw |= REQ_WRITE; blk_rq_map_kern()
210 ret = blk_rq_append_bio(q, rq, bio); blk_rq_map_kern()
213 bio_put(bio); blk_rq_map_kern()
217 blk_queue_bounce(q, &rq->bio); blk_rq_map_kern()
12 blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio) blk_rq_append_bio() argument
H A Dbio.c20 #include <linux/bio.h>
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
73 unsigned int sz = sizeof(struct bio) + extra_size; bio_find_or_create_slab()
113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); bio_find_or_create_slab()
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) bio_put_slab()
237 static void __bio_free(struct bio *bio) __bio_free() argument
239 bio_disassociate_task(bio); __bio_free()
241 if (bio_integrity(bio)) __bio_free()
242 bio_integrity_free(bio); __bio_free()
245 static void bio_free(struct bio *bio) bio_free() argument
247 struct bio_set *bs = bio->bi_pool; bio_free()
250 __bio_free(bio); bio_free()
253 if (bio_flagged(bio, BIO_OWNS_VEC)) bio_free()
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); bio_free()
257 * If we have front padding, adjust the bio pointer before freeing bio_free()
259 p = bio; bio_free()
265 kfree(bio); bio_free()
269 void bio_init(struct bio *bio) bio_init() argument
271 memset(bio, 0, sizeof(*bio)); bio_init()
272 bio->bi_flags = 1 << BIO_UPTODATE; bio_init()
273 atomic_set(&bio->bi_remaining, 1); bio_init()
274 atomic_set(&bio->bi_cnt, 1); bio_init()
279 * bio_reset - reinitialize a bio
280 * @bio: bio to reset
283 * After calling bio_reset(), @bio will be in the same state as a freshly
284 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
286 * comment in struct bio.
288 void bio_reset(struct bio *bio) bio_reset() argument
290 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); bio_reset()
292 __bio_free(bio); bio_reset()
294 memset(bio, 0, BIO_RESET_BYTES); bio_reset()
295 bio->bi_flags = flags|(1 << BIO_UPTODATE); bio_reset()
296 atomic_set(&bio->bi_remaining, 1); bio_reset()
300 static void bio_chain_endio(struct bio *bio, int error) bio_chain_endio() argument
302 bio_endio(bio->bi_private, error); bio_chain_endio()
303 bio_put(bio); bio_chain_endio()
307 * bio_chain - chain bio completions
308 * @bio: the target bio
309 * @parent: the @bio's parent bio
311 * The caller won't have a bi_end_io called when @bio completes - instead,
312 * @parent's bi_end_io won't be called until both @parent and @bio have
313 * completed; the chained bio will also be freed when it completes.
315 * The caller must not set bi_private or bi_end_io in @bio.
317 void bio_chain(struct bio *bio, struct bio *parent) bio_chain() argument
319 BUG_ON(bio->bi_private || bio->bi_end_io); bio_chain()
321 bio->bi_private = parent; bio_chain()
322 bio->bi_end_io = bio_chain_endio; bio_chain()
330 struct bio *bio; bio_alloc_rescue() local
334 bio = bio_list_pop(&bs->rescue_list); bio_alloc_rescue()
337 if (!bio) bio_alloc_rescue()
340 generic_make_request(bio); bio_alloc_rescue()
347 struct bio *bio; punt_bios_to_rescuer() local
351 * were allocated from this bio_set; otherwise, if there was a bio on punt_bios_to_rescuer()
356 * Since bio lists are singly linked, pop them all instead of trying to punt_bios_to_rescuer()
363 while ((bio = bio_list_pop(current->bio_list))) punt_bios_to_rescuer()
364 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); punt_bios_to_rescuer()
376 * bio_alloc_bioset - allocate a bio for I/O
382 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
386 * able to allocate a bio. This is due to the mempool guarantees. To make this
387 * work, callers must never allocate more than 1 bio at a time from this pool.
388 * Callers that need to allocate more than 1 bio must always submit the
389 * previously allocated bio for IO before attempting to allocate a new one.
405 * for per bio allocations.
408 * Pointer to new bio on success, NULL on failure.
410 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) bio_alloc_bioset()
417 struct bio *bio; bio_alloc_bioset() local
424 p = kmalloc(sizeof(struct bio) + bio_alloc_bioset()
471 bio = p + front_pad; bio_alloc_bioset()
472 bio_init(bio); bio_alloc_bioset()
485 bio->bi_flags |= 1 << BIO_OWNS_VEC; bio_alloc_bioset()
487 bvl = bio->bi_inline_vecs; bio_alloc_bioset()
490 bio->bi_pool = bs; bio_alloc_bioset()
491 bio->bi_flags |= idx << BIO_POOL_OFFSET; bio_alloc_bioset()
492 bio->bi_max_vecs = nr_iovecs; bio_alloc_bioset()
493 bio->bi_io_vec = bvl; bio_alloc_bioset()
494 return bio; bio_alloc_bioset()
502 void zero_fill_bio(struct bio *bio) zero_fill_bio() argument
508 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
518 * bio_put - release a reference to a bio
519 * @bio: bio to release reference to
522 * Put a reference to a &struct bio, either one you have gotten with
523 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
525 void bio_put(struct bio *bio) bio_put() argument
527 BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); bio_put()
532 if (atomic_dec_and_test(&bio->bi_cnt)) bio_put()
533 bio_free(bio); bio_put()
537 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) bio_phys_segments() argument
539 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) bio_phys_segments()
540 blk_recount_segments(q, bio); bio_phys_segments()
542 return bio->bi_phys_segments; bio_phys_segments()
547 * __bio_clone_fast - clone a bio that shares the original bio's biovec
548 * @bio: destination bio
549 * @bio_src: bio to clone
551 * Clone a &bio. Caller will own the returned bio, but not
553 * bio will be one.
555 * Caller must ensure that @bio_src is not freed before @bio.
557 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) __bio_clone_fast() argument
559 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); __bio_clone_fast()
565 bio->bi_bdev = bio_src->bi_bdev; __bio_clone_fast()
566 bio->bi_flags |= 1 << BIO_CLONED; __bio_clone_fast()
567 bio->bi_rw = bio_src->bi_rw; __bio_clone_fast()
568 bio->bi_iter = bio_src->bi_iter; __bio_clone_fast()
569 bio->bi_io_vec = bio_src->bi_io_vec; __bio_clone_fast()
574 * bio_clone_fast - clone a bio that shares the original bio's biovec
575 * @bio: bio to clone
579 * Like __bio_clone_fast, only also allocates the returned bio
581 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) bio_clone_fast() argument
583 struct bio *b; bio_clone_fast()
589 __bio_clone_fast(b, bio); bio_clone_fast()
591 if (bio_integrity(bio)) { bio_clone_fast()
594 ret = bio_integrity_clone(b, bio, gfp_mask); bio_clone_fast()
607 * bio_clone_bioset - clone a bio
608 * @bio_src: bio to clone
612 * Clone bio. Caller will own the returned bio, but not the actual data it
613 * points to. Reference count of returned bio will be one.
615 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, bio_clone_bioset()
620 struct bio *bio; bio_clone_bioset() local
624 * bio_src->bi_io_vec to bio->bi_io_vec. bio_clone_bioset()
628 * - The point of cloning the biovec is to produce a bio with a biovec bio_clone_bioset()
631 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if bio_clone_bioset()
637 * that does not own the bio - reason being drivers don't use it for bio_clone_bioset()
644 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); bio_clone_bioset()
645 if (!bio) bio_clone_bioset()
648 bio->bi_bdev = bio_src->bi_bdev; bio_clone_bioset()
649 bio->bi_rw = bio_src->bi_rw; bio_clone_bioset()
650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio_clone_bioset()
651 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio_clone_bioset()
653 if (bio->bi_rw & REQ_DISCARD) bio_clone_bioset()
656 if (bio->bi_rw & REQ_WRITE_SAME) { bio_clone_bioset()
657 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; bio_clone_bioset()
662 bio->bi_io_vec[bio->bi_vcnt++] = bv; bio_clone_bioset()
668 ret = bio_integrity_clone(bio, bio_src, gfp_mask); bio_clone_bioset()
670 bio_put(bio); bio_clone_bioset()
675 return bio; bio_clone_bioset()
685 * into a bio, it does not account for dynamic restrictions that vary
702 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page __bio_add_page() argument
710 * cloned bio must not modify vec list __bio_add_page()
712 if (unlikely(bio_flagged(bio, BIO_CLONED))) __bio_add_page()
715 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) __bio_add_page()
723 if (bio->bi_vcnt > 0) { __bio_add_page()
724 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; __bio_add_page()
737 .bi_bdev = bio->bi_bdev, __bio_add_page()
738 .bi_sector = bio->bi_iter.bi_sector, __bio_add_page()
739 .bi_size = bio->bi_iter.bi_size - __bio_add_page()
741 .bi_rw = bio->bi_rw, __bio_add_page()
750 bio->bi_iter.bi_size += len; __bio_add_page()
763 if (bio->bi_vcnt >= bio->bi_max_vecs) __bio_add_page()
770 bvec = &bio->bi_io_vec[bio->bi_vcnt]; __bio_add_page()
774 bio->bi_vcnt++; __bio_add_page()
775 bio->bi_phys_segments++; __bio_add_page()
776 bio->bi_iter.bi_size += len; __bio_add_page()
783 while (bio->bi_phys_segments > queue_max_segments(q)) { __bio_add_page()
789 blk_recount_segments(q, bio); __bio_add_page()
799 .bi_bdev = bio->bi_bdev, __bio_add_page()
800 .bi_sector = bio->bi_iter.bi_sector, __bio_add_page()
801 .bi_size = bio->bi_iter.bi_size - len, __bio_add_page()
802 .bi_rw = bio->bi_rw, __bio_add_page()
814 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) __bio_add_page()
815 bio->bi_flags &= ~(1 << BIO_SEG_VALID); __bio_add_page()
824 bio->bi_vcnt--; __bio_add_page()
825 bio->bi_iter.bi_size -= len; __bio_add_page()
826 blk_recount_segments(q, bio); __bio_add_page()
831 * bio_add_pc_page - attempt to add page to bio
833 * @bio: destination bio
839 * number of reasons, such as the bio being full or target block device
840 * limitations. The target block device must allow bio's up to PAGE_SIZE,
841 * so it is always possible to add a single page to an empty bio.
845 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, bio_add_pc_page() argument
848 return __bio_add_page(q, bio, page, len, offset, bio_add_pc_page()
854 * bio_add_page - attempt to add page to bio
855 * @bio: destination bio
861 * number of reasons, such as the bio being full or target block device
862 * limitations. The target block device must allow bio's up to PAGE_SIZE,
863 * so it is always possible to add a single page to an empty bio.
865 int bio_add_page(struct bio *bio, struct page *page, unsigned int len, bio_add_page() argument
868 struct request_queue *q = bdev_get_queue(bio->bi_bdev); bio_add_page()
871 max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); bio_add_page()
872 if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) bio_add_page()
875 return __bio_add_page(q, bio, page, len, offset, max_sectors); bio_add_page()
884 static void submit_bio_wait_endio(struct bio *bio, int error) submit_bio_wait_endio() argument
886 struct submit_bio_ret *ret = bio->bi_private; submit_bio_wait_endio()
893 * submit_bio_wait - submit a bio, and wait until it completes
895 * @bio: The &struct bio which describes the I/O
900 int submit_bio_wait(int rw, struct bio *bio) submit_bio_wait() argument
906 bio->bi_private = &ret; submit_bio_wait()
907 bio->bi_end_io = submit_bio_wait_endio; submit_bio_wait()
908 submit_bio(rw, bio); submit_bio_wait()
916 * bio_advance - increment/complete a bio by some number of bytes
917 * @bio: bio to advance
924 * @bio will then represent the remaining, uncompleted portion of the io.
926 void bio_advance(struct bio *bio, unsigned bytes) bio_advance() argument
928 if (bio_integrity(bio)) bio_advance()
929 bio_integrity_advance(bio, bytes); bio_advance()
931 bio_advance_iter(bio, &bio->bi_iter, bytes); bio_advance()
936 * bio_alloc_pages - allocates a single page for each bvec in a bio
937 * @bio: bio to allocate pages for
940 * Allocates pages up to @bio->bi_vcnt.
945 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) bio_alloc_pages() argument
950 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
953 while (--bv >= bio->bi_io_vec) bio_for_each_segment_all()
966 * @src: source bio list
967 * @dst: destination bio list
975 void bio_copy_data(struct bio *dst, struct bio *src) bio_copy_data()
1040 * bio_copy_from_iter - copy all pages from iov_iter to bio
1041 * @bio: The &struct bio which describes the I/O as destination
1044 * Copy all pages from iov_iter to bio.
1047 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) bio_copy_from_iter() argument
1052 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1071 * bio_copy_to_iter - copy all pages from bio to iov_iter
1072 * @bio: The &struct bio which describes the I/O as source
1075 * Copy all pages from bio to iov_iter.
1078 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) bio_copy_to_iter() argument
1083 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1101 static void bio_free_pages(struct bio *bio) bio_free_pages() argument
1106 bio_for_each_segment_all(bvec, bio, i) bio_free_pages()
1111 * bio_uncopy_user - finish previously mapped bio
1112 * @bio: bio being terminated
1117 int bio_uncopy_user(struct bio *bio) bio_uncopy_user() argument
1119 struct bio_map_data *bmd = bio->bi_private; bio_uncopy_user()
1122 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { bio_uncopy_user()
1130 else if (bio_data_dir(bio) == READ) bio_uncopy_user()
1131 ret = bio_copy_to_iter(bio, bmd->iter); bio_uncopy_user()
1133 bio_free_pages(bio); bio_uncopy_user()
1136 bio_put(bio); bio_uncopy_user()
1142 * bio_copy_user_iov - copy user data to bio
1148 * Prepares and returns a bio for indirect user io, bouncing data
1152 struct bio *bio_copy_user_iov(struct request_queue *q, bio_copy_user_iov()
1159 struct bio *bio; bio_copy_user_iov() local
1202 bio = bio_kmalloc(gfp_mask, nr_pages); bio_copy_user_iov()
1203 if (!bio) bio_copy_user_iov()
1207 bio->bi_rw |= REQ_WRITE; bio_copy_user_iov()
1241 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) bio_copy_user_iov()
1256 ret = bio_copy_from_iter(bio, *iter); bio_copy_user_iov()
1261 bio->bi_private = bmd; bio_copy_user_iov()
1262 return bio; bio_copy_user_iov()
1265 bio_free_pages(bio); bio_copy_user_iov()
1266 bio_put(bio); bio_copy_user_iov()
1273 * bio_map_user_iov - map user iovec into bio
1274 * @q: the struct request_queue for the bio
1278 * Map the user space address into a bio suitable for io to a block
1281 struct bio *bio_map_user_iov(struct request_queue *q, bio_map_user_iov()
1288 struct bio *bio; bio_map_user_iov() local
1317 bio = bio_kmalloc(gfp_mask, nr_pages); bio_map_user_iov()
1318 if (!bio) bio_map_user_iov()
1355 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < bio_map_user_iov()
1365 * release the pages we didn't map into the bio, if any bio_map_user_iov()
1377 bio->bi_rw |= REQ_WRITE; bio_map_user_iov()
1379 bio->bi_flags |= (1 << BIO_USER_MAPPED); bio_map_user_iov()
1382 * subtle -- if __bio_map_user() ended up bouncing a bio, bio_map_user_iov()
1387 bio_get(bio); bio_map_user_iov()
1388 return bio; bio_map_user_iov()
1398 bio_put(bio); bio_map_user_iov()
1402 static void __bio_unmap_user(struct bio *bio) __bio_unmap_user() argument
1410 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1411 if (bio_data_dir(bio) == READ) bio_for_each_segment_all()
1417 bio_put(bio);
1421 * bio_unmap_user - unmap a bio
1422 * @bio: the bio being unmapped
1424 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1429 void bio_unmap_user(struct bio *bio) bio_unmap_user() argument
1431 __bio_unmap_user(bio); bio_unmap_user()
1432 bio_put(bio); bio_unmap_user()
1436 static void bio_map_kern_endio(struct bio *bio, int err) bio_map_kern_endio() argument
1438 bio_put(bio); bio_map_kern_endio()
1442 * bio_map_kern - map kernel address into bio
1443 * @q: the struct request_queue for the bio
1446 * @gfp_mask: allocation flags for bio allocation
1448 * Map the kernel address into a bio suitable for io to a block
1451 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, bio_map_kern()
1459 struct bio *bio; bio_map_kern() local
1461 bio = bio_kmalloc(gfp_mask, nr_pages); bio_map_kern()
1462 if (!bio) bio_map_kern()
1475 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, bio_map_kern()
1478 bio_put(bio); bio_map_kern()
1487 bio->bi_end_io = bio_map_kern_endio; bio_map_kern()
1488 return bio; bio_map_kern()
1492 static void bio_copy_kern_endio(struct bio *bio, int err) bio_copy_kern_endio() argument
1494 bio_free_pages(bio); bio_copy_kern_endio()
1495 bio_put(bio); bio_copy_kern_endio()
1498 static void bio_copy_kern_endio_read(struct bio *bio, int err) bio_copy_kern_endio_read() argument
1500 char *p = bio->bi_private; bio_copy_kern_endio_read()
1504 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1509 bio_copy_kern_endio(bio, err);
1513 * bio_copy_kern - copy kernel address into bio
1514 * @q: the struct request_queue for the bio
1517 * @gfp_mask: allocation flags for bio and page allocation
1520 * copy the kernel address into a bio suitable for io to a block
1523 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, bio_copy_kern()
1529 struct bio *bio; bio_copy_kern() local
1540 bio = bio_kmalloc(gfp_mask, nr_pages); bio_copy_kern()
1541 if (!bio) bio_copy_kern()
1558 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) bio_copy_kern()
1566 bio->bi_end_io = bio_copy_kern_endio_read; bio_copy_kern()
1567 bio->bi_private = data; bio_copy_kern()
1569 bio->bi_end_io = bio_copy_kern_endio; bio_copy_kern()
1570 bio->bi_rw |= REQ_WRITE; bio_copy_kern()
1573 return bio; bio_copy_kern()
1576 bio_free_pages(bio); bio_copy_kern()
1577 bio_put(bio); bio_copy_kern()
1605 * deferred bio dirtying paths.
1609 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1611 void bio_set_pages_dirty(struct bio *bio) bio_set_pages_dirty() argument
1616 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1624 static void bio_release_pages(struct bio *bio) bio_release_pages() argument
1629 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1652 static struct bio *bio_dirty_list;
1660 struct bio *bio; bio_dirty_fn() local
1663 bio = bio_dirty_list; bio_dirty_fn()
1667 while (bio) { bio_dirty_fn()
1668 struct bio *next = bio->bi_private; bio_dirty_fn()
1670 bio_set_pages_dirty(bio); bio_dirty_fn()
1671 bio_release_pages(bio); bio_dirty_fn()
1672 bio_put(bio); bio_dirty_fn()
1673 bio = next; bio_dirty_fn()
1677 void bio_check_pages_dirty(struct bio *bio) bio_check_pages_dirty() argument
1683 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1698 bio->bi_private = bio_dirty_list;
1699 bio_dirty_list = bio;
1703 bio_put(bio);
1736 void bio_flush_dcache_pages(struct bio *bi) bio_flush_dcache_pages()
1748 * bio_endio - end I/O on a bio
1749 * @bio: bio
1753 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1754 * preferred way to end I/O on a bio, it takes care of clearing
1758 * bio unless they own it and thus know that it has an end_io
1761 void bio_endio(struct bio *bio, int error) bio_endio() argument
1763 while (bio) { bio_endio()
1764 BUG_ON(atomic_read(&bio->bi_remaining) <= 0); bio_endio()
1767 clear_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio()
1768 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) bio_endio()
1771 if (!atomic_dec_and_test(&bio->bi_remaining)) bio_endio()
1782 if (bio->bi_end_io == bio_chain_endio) { bio_endio()
1783 struct bio *parent = bio->bi_private; bio_endio()
1784 bio_put(bio); bio_endio()
1785 bio = parent; bio_endio()
1787 if (bio->bi_end_io) bio_endio()
1788 bio->bi_end_io(bio, error); bio_endio()
1789 bio = NULL; bio_endio()
1796 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1797 * @bio: bio
1801 * function, probably you should've cloned the entire bio.
1803 void bio_endio_nodec(struct bio *bio, int error) bio_endio_nodec() argument
1805 atomic_inc(&bio->bi_remaining); bio_endio_nodec()
1806 bio_endio(bio, error); bio_endio_nodec()
1811 * bio_split - split a bio
1812 * @bio: bio to split
1813 * @sectors: number of sectors to split from the front of @bio
1815 * @bs: bio set to allocate from
1817 * Allocates and returns a new bio which represents @sectors from the start of
1818 * @bio, and updates @bio to represent the remaining sectors.
1820 * Unless this is a discard request the newly allocated bio will point
1821 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1822 * @bio is not freed before the split.
1824 struct bio *bio_split(struct bio *bio, int sectors, bio_split() argument
1827 struct bio *split = NULL; bio_split()
1830 BUG_ON(sectors >= bio_sectors(bio)); bio_split()
1836 if (bio->bi_rw & REQ_DISCARD) bio_split()
1837 split = bio_clone_bioset(bio, gfp, bs); bio_split()
1839 split = bio_clone_fast(bio, gfp, bs); bio_split()
1849 bio_advance(bio, split->bi_iter.bi_size); bio_split()
1856 * bio_trim - trim a bio
1857 * @bio: bio to trim
1858 * @offset: number of sectors to trim from the front of @bio
1859 * @size: size we want to trim @bio to, in sectors
1861 void bio_trim(struct bio *bio, int offset, int size) bio_trim() argument
1863 /* 'bio' is a cloned bio which we need to trim to match bio_trim()
1868 if (offset == 0 && size == bio->bi_iter.bi_size) bio_trim()
1871 clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_trim()
1873 bio_advance(bio, offset << 9); bio_trim()
1875 bio->bi_iter.bi_size = size; bio_trim()
1953 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1954 * @front_pad: Number of bytes to allocate in front of the returned bio
1958 * to ask for a number of bytes to be allocated in front of the bio.
1959 * Front pad allocation is useful for embedding the bio inside
1960 * another structure, to avoid allocating extra data to go with the bio.
1961 * Note that the bio must be embedded at the END of that structure always,
1972 * @pool_size: Number of bio to cache in the mempool
1973 * @front_pad: Number of bytes to allocate in front of the returned bio
1987 * bio_associate_current - associate a bio with %current
1988 * @bio: target bio
1990 * Associate @bio with %current if it hasn't been associated yet. Block
1991 * layer will treat @bio as if it were issued by %current no matter which
1995 * which will be put when @bio is released. The caller must own @bio,
1999 int bio_associate_current(struct bio *bio) bio_associate_current() argument
2004 if (bio->bi_ioc) bio_associate_current()
2013 bio->bi_ioc = ioc; bio_associate_current()
2019 bio->bi_css = css; bio_associate_current()
2027 * @bio: target bio
2029 void bio_disassociate_task(struct bio *bio) bio_disassociate_task() argument
2031 if (bio->bi_ioc) { bio_disassociate_task()
2032 put_io_context(bio->bi_ioc); bio_disassociate_task()
2033 bio->bi_ioc = NULL; bio_disassociate_task()
2035 if (bio->bi_css) { bio_disassociate_task()
2036 css_put(bio->bi_css); bio_disassociate_task()
2037 bio->bi_css = NULL; bio_disassociate_task()
2068 panic("bio: can't allocate bios\n"); init_bio()
2075 panic("bio: can't allocate bios\n"); init_bio()
2078 panic("bio: can't create integrity pool\n"); init_bio()
H A Dbio-integrity.c2 * bio-integrity.c - bio data integrity extensions
26 #include <linux/bio.h>
36 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
37 * @bio: bio to attach integrity metadata to
41 * Description: This function prepares a bio for attaching integrity
45 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, bio_integrity_alloc() argument
50 struct bio_set *bs = bio->bi_pool; bio_integrity_alloc()
80 bip->bip_bio = bio; bio_integrity_alloc()
81 bio->bi_integrity = bip; bio_integrity_alloc()
82 bio->bi_rw |= REQ_INTEGRITY; bio_integrity_alloc()
92 * bio_integrity_free - Free bio integrity payload
93 * @bio: bio containing bip to be freed
95 * Description: Used to free the integrity portion of a bio. Usually
98 void bio_integrity_free(struct bio *bio) bio_integrity_free() argument
100 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_free()
101 struct bio_set *bs = bio->bi_pool; bio_integrity_free()
117 bio->bi_integrity = NULL; bio_integrity_free()
123 * @bio: bio to update
128 * Description: Attach a page containing integrity metadata to bio.
130 int bio_integrity_add_page(struct bio *bio, struct page *page, bio_integrity_add_page() argument
133 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_add_page()
154 * @bio: bio to check
157 * on this bio or not. bio data direction and target device must be
161 bool bio_integrity_enabled(struct bio *bio) bio_integrity_enabled() argument
163 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_enabled()
165 if (!bio_is_rw(bio)) bio_integrity_enabled()
169 if (bio_integrity(bio)) bio_integrity_enabled()
175 if (bio_data_dir(bio) == READ && bi->verify_fn != NULL && bio_integrity_enabled()
179 if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL && bio_integrity_enabled()
188 * bio_integrity_intervals - Return number of integrity intervals for a bio
190 * @sectors: Size of the bio in 512-byte sectors
210 * bio_integrity_process - Process integrity metadata for a bio
211 * @bio: bio to generate/verify integrity metadata for
214 static int bio_integrity_process(struct bio *bio, bio_integrity_process() argument
217 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_process()
221 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_process()
226 iter.disk_name = bio->bi_bdev->bd_disk->disk_name; bio_integrity_process()
231 bio_for_each_segment(bv, bio, bviter) { bio_for_each_segment()
249 * bio_integrity_prep - Prepare bio for integrity I/O
250 * @bio: bio to prepare
253 * pages and attaches them to a bio. The bio must have data
259 int bio_integrity_prep(struct bio *bio) bio_integrity_prep() argument
270 bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_prep()
271 q = bdev_get_queue(bio->bi_bdev); bio_integrity_prep()
273 BUG_ON(bio_integrity(bio)); bio_integrity_prep()
275 intervals = bio_integrity_intervals(bi, bio_sectors(bio)); bio_integrity_prep()
289 /* Allocate bio integrity payload and integrity vectors */ bio_integrity_prep()
290 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); bio_integrity_prep()
299 bip_set_seed(bip, bio->bi_iter.bi_sector); bio_integrity_prep()
316 ret = bio_integrity_add_page(bio, virt_to_page(buf), bio_integrity_prep()
331 if (bio_data_dir(bio) == READ) { bio_integrity_prep()
332 bip->bip_end_io = bio->bi_end_io; bio_integrity_prep()
333 bio->bi_end_io = bio_integrity_endio; bio_integrity_prep()
337 if (bio_data_dir(bio) == WRITE) bio_integrity_prep()
338 bio_integrity_process(bio, bi->generate_fn); bio_integrity_prep()
346 * @work: Work struct stored in bio to be verified
350 * and then calls the original bio end_io function.
356 struct bio *bio = bip->bip_bio; bio_integrity_verify_fn() local
357 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_verify_fn()
360 error = bio_integrity_process(bio, bi->verify_fn); bio_integrity_verify_fn()
362 /* Restore original bio completion handler */ bio_integrity_verify_fn()
363 bio->bi_end_io = bip->bip_end_io; bio_integrity_verify_fn()
364 bio_endio_nodec(bio, error); bio_integrity_verify_fn()
369 * @bio: Protected bio
379 void bio_integrity_endio(struct bio *bio, int error) bio_integrity_endio() argument
381 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_endio()
383 BUG_ON(bip->bip_bio != bio); bio_integrity_endio()
386 * integrity metadata. Restore original bio end_io handler bio_integrity_endio()
390 bio->bi_end_io = bip->bip_end_io; bio_integrity_endio()
391 bio_endio_nodec(bio, error); bio_integrity_endio()
403 * @bio: bio whose integrity vector to update
410 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) bio_integrity_advance() argument
412 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_advance()
413 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_advance()
422 * @bio: bio whose integrity vector to update
426 * Description: Used to trim the integrity vector in a cloned bio.
431 void bio_integrity_trim(struct bio *bio, unsigned int offset, bio_integrity_trim() argument
434 struct bio_integrity_payload *bip = bio_integrity(bio); bio_integrity_trim()
435 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); bio_integrity_trim()
437 bio_integrity_advance(bio, offset << 9); bio_integrity_trim()
444 * @bio: New bio
445 * @bio_src: Original bio
448 * Description: Called to allocate a bip when cloning a bio
450 int bio_integrity_clone(struct bio *bio, struct bio *bio_src, bio_integrity_clone() argument
458 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); bio_integrity_clone()
H A Dblk-merge.c6 #include <linux/bio.h>
13 struct bio *bio, __blk_recalc_rq_segments()
19 struct bio *fbio, *bbio; __blk_recalc_rq_segments()
22 if (!bio) __blk_recalc_rq_segments()
29 if (bio->bi_rw & REQ_DISCARD) __blk_recalc_rq_segments()
32 if (bio->bi_rw & REQ_WRITE_SAME) __blk_recalc_rq_segments()
35 fbio = bio; __blk_recalc_rq_segments()
40 for_each_bio(bio) { bio_for_each_segment()
41 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
43 * If SG merging is disabled, each bio vector is bio_for_each_segment()
78 bbio = bio;
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, blk_recalc_rq_segments()
98 void blk_recount_segments(struct request_queue *q, struct bio *bio) blk_recount_segments() argument
102 /* estimate segment number by bi_vcnt for non-cloned bio */ blk_recount_segments()
103 if (bio_flagged(bio, BIO_CLONED)) blk_recount_segments()
104 seg_cnt = bio_segments(bio); blk_recount_segments()
106 seg_cnt = bio->bi_vcnt; blk_recount_segments()
110 bio->bi_phys_segments = seg_cnt; blk_recount_segments()
112 struct bio *nxt = bio->bi_next; blk_recount_segments()
114 bio->bi_next = NULL; blk_recount_segments()
115 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); blk_recount_segments()
116 bio->bi_next = nxt; blk_recount_segments()
119 bio->bi_flags |= (1 << BIO_SEG_VALID); blk_recount_segments()
123 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, blk_phys_contig_segment() argument
124 struct bio *nxt) blk_phys_contig_segment()
132 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > blk_phys_contig_segment()
136 if (!bio_has_data(bio)) blk_phys_contig_segment()
139 bio_for_each_segment(end_bv, bio, iter) blk_phys_contig_segment()
149 * bio and nxt are contiguous in memory; check if the queue allows blk_phys_contig_segment()
201 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, __blk_bios_map_sg() argument
212 if (bio->bi_rw & REQ_DISCARD) { __blk_bios_map_sg()
216 * blk_add_request_payload(), a discard bio may or may not have __blk_bios_map_sg()
221 if (bio->bi_vcnt) __blk_bios_map_sg()
227 if (bio->bi_rw & REQ_WRITE_SAME) { __blk_bios_map_sg()
230 bvec = bio_iovec(bio); __blk_bios_map_sg()
235 for_each_bio(bio) __blk_bios_map_sg()
236 bio_for_each_segment(bvec, bio, iter) __blk_bios_map_sg()
253 if (rq->bio) blk_rq_map_sg()
254 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); blk_rq_map_sg()
288 struct bio *bio) ll_new_hw_segment()
290 int nr_phys_segs = bio_phys_segments(q, bio); ll_new_hw_segment()
295 if (blk_integrity_merge_bio(q, req, bio) == false) ll_new_hw_segment()
313 struct bio *bio) ll_back_merge_fn()
315 if (blk_rq_sectors(req) + bio_sectors(bio) > ll_back_merge_fn()
324 if (!bio_flagged(bio, BIO_SEG_VALID)) ll_back_merge_fn()
325 blk_recount_segments(q, bio); ll_back_merge_fn()
327 return ll_new_hw_segment(q, req, bio); ll_back_merge_fn()
331 struct bio *bio) ll_front_merge_fn()
333 if (blk_rq_sectors(req) + bio_sectors(bio) > ll_front_merge_fn()
340 if (!bio_flagged(bio, BIO_SEG_VALID)) ll_front_merge_fn()
341 blk_recount_segments(q, bio); ll_front_merge_fn()
342 if (!bio_flagged(req->bio, BIO_SEG_VALID)) ll_front_merge_fn()
343 blk_recount_segments(q, req->bio); ll_front_merge_fn()
345 return ll_new_hw_segment(q, req, bio); ll_front_merge_fn()
361 struct bio *prev = req->biotail; req_gap_to_prev()
364 next->bio->bi_io_vec[0].bv_offset); req_gap_to_prev()
372 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; ll_merge_requests_fn()
393 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { ll_merge_requests_fn()
395 req->bio->bi_seg_front_size = seg_size; ll_merge_requests_fn()
418 * which can be mixed are set in each bio and mark @rq as mixed
424 struct bio *bio; blk_rq_set_mixed_merge() local
432 * Distributes the attributs to each bio. blk_rq_set_mixed_merge()
434 for (bio = rq->bio; bio; bio = bio->bi_next) { blk_rq_set_mixed_merge()
435 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && blk_rq_set_mixed_merge()
436 (bio->bi_rw & REQ_FAILFAST_MASK) != ff); blk_rq_set_mixed_merge()
437 bio->bi_rw |= ff; blk_rq_set_mixed_merge()
483 !blk_write_same_mergeable(req->bio, next->bio)) attempt_merge()
487 * If we are allowed to merge, then append bio list attempt_merge()
517 req->biotail->bi_next = next->bio; attempt_merge()
533 /* owner-ship of bio passed from next to req */ attempt_merge()
534 next->bio = NULL; attempt_merge()
565 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) blk_rq_merge_ok() argument
569 if (!rq_mergeable(rq) || !bio_mergeable(bio)) blk_rq_merge_ok()
572 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) blk_rq_merge_ok()
576 if (bio_data_dir(bio) != rq_data_dir(rq)) blk_rq_merge_ok()
580 if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) blk_rq_merge_ok()
583 /* only merge integrity protected bio into ditto rq */ blk_rq_merge_ok()
584 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) blk_rq_merge_ok()
589 !blk_write_same_mergeable(rq->bio, bio)) blk_rq_merge_ok()
596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) blk_rq_merge_ok()
603 int blk_try_merge(struct request *rq, struct bio *bio) blk_try_merge() argument
605 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) blk_try_merge()
607 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) blk_try_merge()
12 __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio, bool no_sg_merge) __blk_recalc_rq_segments() argument
286 ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) ll_new_hw_segment() argument
312 ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) ll_back_merge_fn() argument
330 ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) ll_front_merge_fn() argument
H A Dbounce.c12 #include <linux/bio.h>
101 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) copy_to_high_bio_irq()
125 static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) bounce_end_io() argument
127 struct bio *bio_orig = bio->bi_private; bounce_end_io()
131 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) bounce_end_io()
137 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
147 bio_put(bio);
150 static void bounce_end_io_write(struct bio *bio, int err) bounce_end_io_write() argument
152 bounce_end_io(bio, page_pool, err); bounce_end_io_write()
155 static void bounce_end_io_write_isa(struct bio *bio, int err) bounce_end_io_write_isa() argument
158 bounce_end_io(bio, isa_page_pool, err); bounce_end_io_write_isa()
161 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) __bounce_end_io_read() argument
163 struct bio *bio_orig = bio->bi_private; __bounce_end_io_read()
165 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) __bounce_end_io_read()
166 copy_to_high_bio_irq(bio_orig, bio); __bounce_end_io_read()
168 bounce_end_io(bio, pool, err); __bounce_end_io_read()
171 static void bounce_end_io_read(struct bio *bio, int err) bounce_end_io_read() argument
173 __bounce_end_io_read(bio, page_pool, err); bounce_end_io_read()
176 static void bounce_end_io_read_isa(struct bio *bio, int err) bounce_end_io_read_isa() argument
178 __bounce_end_io_read(bio, isa_page_pool, err); bounce_end_io_read_isa()
182 static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) must_snapshot_stable_pages() argument
184 if (bio_data_dir(bio) != WRITE) must_snapshot_stable_pages()
190 return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); must_snapshot_stable_pages()
193 static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) must_snapshot_stable_pages() argument
199 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, __blk_queue_bounce()
202 struct bio *bio; __blk_queue_bounce() local
216 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); __blk_queue_bounce()
218 bio_for_each_segment_all(to, bio, i) { bio_for_each_segment_all()
241 bio->bi_flags |= (1 << BIO_BOUNCED);
244 bio->bi_end_io = bounce_end_io_write;
246 bio->bi_end_io = bounce_end_io_read;
248 bio->bi_end_io = bounce_end_io_write_isa;
250 bio->bi_end_io = bounce_end_io_read_isa;
253 bio->bi_private = *bio_orig;
254 *bio_orig = bio;
257 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) blk_queue_bounce()
263 * Data-less bio, nothing to bounce blk_queue_bounce()
273 * don't waste time iterating over bio segments blk_queue_bounce()
H A Dblk-lib.c6 #include <linux/bio.h>
18 static void bio_batch_end_io(struct bio *bio, int err) bio_batch_end_io() argument
20 struct bio_batch *bb = bio->bi_private; bio_batch_end_io()
26 bio_put(bio); bio_batch_end_io()
49 struct bio *bio; blkdev_issue_discard() local
89 bio = bio_alloc(gfp_mask, 1); blkdev_issue_discard()
90 if (!bio) { blkdev_issue_discard()
111 bio->bi_iter.bi_sector = sector; blkdev_issue_discard()
112 bio->bi_end_io = bio_batch_end_io; blkdev_issue_discard()
113 bio->bi_bdev = bdev; blkdev_issue_discard()
114 bio->bi_private = &bb; blkdev_issue_discard()
116 bio->bi_iter.bi_size = req_sects << 9; blkdev_issue_discard()
121 submit_bio(type, bio); blkdev_issue_discard()
163 struct bio *bio; blkdev_issue_write_same() local
179 bio = bio_alloc(gfp_mask, 1); blkdev_issue_write_same()
180 if (!bio) { blkdev_issue_write_same()
185 bio->bi_iter.bi_sector = sector; blkdev_issue_write_same()
186 bio->bi_end_io = bio_batch_end_io; blkdev_issue_write_same()
187 bio->bi_bdev = bdev; blkdev_issue_write_same()
188 bio->bi_private = &bb; blkdev_issue_write_same()
189 bio->bi_vcnt = 1; blkdev_issue_write_same()
190 bio->bi_io_vec->bv_page = page; blkdev_issue_write_same()
191 bio->bi_io_vec->bv_offset = 0; blkdev_issue_write_same()
192 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); blkdev_issue_write_same()
195 bio->bi_iter.bi_size = max_write_same_sectors << 9; blkdev_issue_write_same()
199 bio->bi_iter.bi_size = nr_sects << 9; blkdev_issue_write_same()
204 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); blkdev_issue_write_same()
233 struct bio *bio; __blkdev_issue_zeroout() local
244 bio = bio_alloc(gfp_mask, __blkdev_issue_zeroout()
246 if (!bio) { __blkdev_issue_zeroout()
251 bio->bi_iter.bi_sector = sector; __blkdev_issue_zeroout()
252 bio->bi_bdev = bdev; __blkdev_issue_zeroout()
253 bio->bi_end_io = bio_batch_end_io; __blkdev_issue_zeroout()
254 bio->bi_private = &bb; __blkdev_issue_zeroout()
258 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); __blkdev_issue_zeroout()
266 submit_bio(WRITE, bio); __blkdev_issue_zeroout()
H A Dblk-core.c8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
17 #include <linux/bio.h>
117 static void req_bio_endio(struct request *rq, struct bio *bio, req_bio_endio() argument
121 clear_bit(BIO_UPTODATE, &bio->bi_flags); req_bio_endio()
122 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) req_bio_endio()
126 set_bit(BIO_QUIET, &bio->bi_flags); req_bio_endio()
128 bio_advance(bio, nbytes); req_bio_endio()
130 /* don't actually finish bio if it's part of flush sequence */ req_bio_endio()
131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) req_bio_endio()
132 bio_endio(bio, error); req_bio_endio()
146 printk(KERN_INFO " bio %p, biotail %p, len %u\n", blk_dump_rq_flags()
147 rq->bio, rq->biotail, blk_rq_bytes(rq)); blk_dump_rq_flags()
737 static void blk_queue_bio(struct request_queue *q, struct bio *bio);
927 * request associated with @bio.
929 static bool blk_rq_should_init_elevator(struct bio *bio) blk_rq_should_init_elevator() argument
931 if (!bio) blk_rq_should_init_elevator()
938 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) blk_rq_should_init_elevator()
946 * @bio: request being allocated is for this bio (can be %NULL)
948 * Determine io_context to use for request allocation for @bio. May return
951 static struct io_context *rq_ioc(struct bio *bio) rq_ioc() argument
954 if (bio && bio->bi_ioc) rq_ioc()
955 return bio->bi_ioc; rq_ioc()
964 * @bio: bio to allocate request for (can be %NULL)
975 struct bio *bio, gfp_t gfp_mask) __get_request()
980 struct io_context *ioc = rq_ioc(bio); __get_request()
1045 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { __get_request()
1075 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) __get_request()
1092 trace_block_getrq(q, bio, rw_flags & 1); __get_request()
1141 * @bio: bio to allocate request for (can be %NULL)
1152 struct bio *bio, gfp_t gfp_mask) get_request()
1159 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ get_request()
1161 rq = __get_request(rl, rw_flags, bio, gfp_mask); get_request()
1174 trace_block_sleeprq(q, bio, rw_flags & 1); get_request()
1221 * blk_make_request - given a bio, allocate a corresponding struct request.
1223 * @bio: The bio describing the memory mappings that will be submitted for IO.
1224 * It may be a chained-bio properly constructed by block/bio layer.
1229 * the caller. It is passed a &struct bio, which describes the memory info of
1234 * the needed direction of the request. (And all bio's in the passed bio-chain
1237 * If called under none-sleepable conditions, mapped bio buffers must not
1242 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1244 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1245 * completion of a bio that hasn't been submitted yet, thus resulting in a
1251 struct request *blk_make_request(struct request_queue *q, struct bio *bio, blk_make_request() argument
1254 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); blk_make_request()
1261 for_each_bio(bio) { for_each_bio()
1262 struct bio *bounce_bio = bio; for_each_bio()
1287 rq->bio = rq->biotail = NULL; blk_rq_set_block_pc()
1394 /* this is a bio leak */ __blk_put_request()
1395 WARN_ON(req->bio != NULL); __blk_put_request()
1447 struct bio *bio = rq->bio; blk_add_request_payload() local
1449 bio->bi_io_vec->bv_page = page; blk_add_request_payload()
1450 bio->bi_io_vec->bv_offset = 0; blk_add_request_payload()
1451 bio->bi_io_vec->bv_len = len; blk_add_request_payload()
1453 bio->bi_iter.bi_size = len; blk_add_request_payload()
1454 bio->bi_vcnt = 1; blk_add_request_payload()
1455 bio->bi_phys_segments = 1; blk_add_request_payload()
1463 struct bio *bio) bio_attempt_back_merge()
1465 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; bio_attempt_back_merge()
1467 if (!ll_back_merge_fn(q, req, bio)) bio_attempt_back_merge()
1470 trace_block_bio_backmerge(q, req, bio); bio_attempt_back_merge()
1475 req->biotail->bi_next = bio; bio_attempt_back_merge()
1476 req->biotail = bio; bio_attempt_back_merge()
1477 req->__data_len += bio->bi_iter.bi_size; bio_attempt_back_merge()
1478 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); bio_attempt_back_merge()
1485 struct bio *bio) bio_attempt_front_merge()
1487 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; bio_attempt_front_merge()
1489 if (!ll_front_merge_fn(q, req, bio)) bio_attempt_front_merge()
1492 trace_block_bio_frontmerge(q, req, bio); bio_attempt_front_merge()
1497 bio->bi_next = req->bio; bio_attempt_front_merge()
1498 req->bio = bio; bio_attempt_front_merge()
1500 req->__sector = bio->bi_iter.bi_sector; bio_attempt_front_merge()
1501 req->__data_len += bio->bi_iter.bi_size; bio_attempt_front_merge()
1502 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); bio_attempt_front_merge()
1510 * @q: request_queue new bio is being queued at
1511 * @bio: new bio being queued
1514 * Determine whether @bio being queued on @q can be merged with a request
1527 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, blk_attempt_plug_merge() argument
1551 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) list_for_each_entry_reverse()
1554 el_ret = blk_try_merge(rq, bio); list_for_each_entry_reverse()
1556 ret = bio_attempt_back_merge(q, rq, bio); list_for_each_entry_reverse()
1560 ret = bio_attempt_front_merge(q, rq, bio); list_for_each_entry_reverse()
1569 void init_request_from_bio(struct request *req, struct bio *bio) init_request_from_bio() argument
1573 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; init_request_from_bio()
1574 if (bio->bi_rw & REQ_RAHEAD) init_request_from_bio()
1578 req->__sector = bio->bi_iter.bi_sector; init_request_from_bio()
1579 req->ioprio = bio_prio(bio); init_request_from_bio()
1580 blk_rq_bio_prep(req->q, req, bio); init_request_from_bio()
1583 static void blk_queue_bio(struct request_queue *q, struct bio *bio) blk_queue_bio() argument
1585 const bool sync = !!(bio->bi_rw & REQ_SYNC); blk_queue_bio()
1596 blk_queue_bounce(q, &bio); blk_queue_bio()
1598 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { blk_queue_bio()
1599 bio_endio(bio, -EIO); blk_queue_bio()
1603 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { blk_queue_bio()
1614 blk_attempt_plug_merge(q, bio, &request_count)) blk_queue_bio()
1619 el_ret = elv_merge(q, &req, bio); blk_queue_bio()
1621 if (bio_attempt_back_merge(q, req, bio)) { blk_queue_bio()
1622 elv_bio_merged(q, req, bio); blk_queue_bio()
1628 if (bio_attempt_front_merge(q, req, bio)) { blk_queue_bio()
1629 elv_bio_merged(q, req, bio); blk_queue_bio()
1642 rw_flags = bio_data_dir(bio); blk_queue_bio()
1650 req = get_request(q, rw_flags, bio, GFP_NOIO); blk_queue_bio()
1652 bio_endio(bio, PTR_ERR(req)); /* @q is dead */ blk_queue_bio()
1662 init_request_from_bio(req, bio); blk_queue_bio()
1693 * If bio->bi_dev is a partition, remap the location
1695 static inline void blk_partition_remap(struct bio *bio) blk_partition_remap() argument
1697 struct block_device *bdev = bio->bi_bdev; blk_partition_remap()
1699 if (bio_sectors(bio) && bdev != bdev->bd_contains) { blk_partition_remap()
1702 bio->bi_iter.bi_sector += p->start_sect; blk_partition_remap()
1703 bio->bi_bdev = bdev->bd_contains; blk_partition_remap()
1705 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, blk_partition_remap()
1707 bio->bi_iter.bi_sector - p->start_sect); blk_partition_remap()
1711 static void handle_bad_sector(struct bio *bio) handle_bad_sector() argument
1717 bdevname(bio->bi_bdev, b), handle_bad_sector()
1718 bio->bi_rw, handle_bad_sector()
1719 (unsigned long long)bio_end_sector(bio), handle_bad_sector()
1720 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); handle_bad_sector()
1722 set_bit(BIO_EOF, &bio->bi_flags); handle_bad_sector()
1761 * Check whether this bio extends beyond the end of the device.
1763 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) bio_check_eod() argument
1771 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; bio_check_eod()
1773 sector_t sector = bio->bi_iter.bi_sector; bio_check_eod()
1781 handle_bad_sector(bio); bio_check_eod()
1790 generic_make_request_checks(struct bio *bio) generic_make_request_checks() argument
1793 int nr_sectors = bio_sectors(bio); generic_make_request_checks()
1800 if (bio_check_eod(bio, nr_sectors)) generic_make_request_checks()
1803 q = bdev_get_queue(bio->bi_bdev); generic_make_request_checks()
1808 bdevname(bio->bi_bdev, b), generic_make_request_checks()
1809 (long long) bio->bi_iter.bi_sector); generic_make_request_checks()
1813 if (likely(bio_is_rw(bio) && generic_make_request_checks()
1815 printk(KERN_ERR "bio too big device %s (%u > %u)\n", generic_make_request_checks()
1816 bdevname(bio->bi_bdev, b), generic_make_request_checks()
1817 bio_sectors(bio), generic_make_request_checks()
1822 part = bio->bi_bdev->bd_part; generic_make_request_checks()
1823 if (should_fail_request(part, bio->bi_iter.bi_size) || generic_make_request_checks()
1825 bio->bi_iter.bi_size)) generic_make_request_checks()
1832 blk_partition_remap(bio); generic_make_request_checks()
1834 if (bio_check_eod(bio, nr_sectors)) generic_make_request_checks()
1838 * Filter flush bio's early so that make_request based generic_make_request_checks()
1842 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { generic_make_request_checks()
1843 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); generic_make_request_checks()
1850 if ((bio->bi_rw & REQ_DISCARD) && generic_make_request_checks()
1852 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { generic_make_request_checks()
1857 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { generic_make_request_checks()
1870 if (blk_throtl_bio(q, bio)) generic_make_request_checks()
1873 trace_block_bio_queue(q, bio); generic_make_request_checks()
1877 bio_endio(bio, err); generic_make_request_checks()
1883 * @bio: The bio describing the location in memory and on the device.
1886 * devices. It is passed a &struct bio, which describes the I/O that needs
1891 * completion, is delivered asynchronously through the bio->bi_end_io
1901 * bio happens to be merged with someone else, and may resubmit the bio to
1903 * means the bio should NOT be touched after the call to ->make_request_fn.
1905 void generic_make_request(struct bio *bio) generic_make_request() argument
1909 if (!generic_make_request_checks(bio)) generic_make_request()
1923 bio_list_add(current->bio_list, bio); generic_make_request()
1929 * Before entering the loop, bio->bi_next is NULL (as all callers generic_make_request()
1930 * ensure that) so we have a list with a single bio. generic_make_request()
1937 * from the top. In this case we really did just take the bio generic_make_request()
1941 BUG_ON(bio->bi_next); generic_make_request()
1945 struct request_queue *q = bdev_get_queue(bio->bi_bdev); generic_make_request()
1947 q->make_request_fn(q, bio); generic_make_request()
1949 bio = bio_list_pop(current->bio_list); generic_make_request()
1950 } while (bio); generic_make_request()
1956 * submit_bio - submit a bio to the block device layer for I/O
1958 * @bio: The &struct bio which describes the I/O
1962 * interfaces; @bio must be presetup and ready for I/O.
1965 void submit_bio(int rw, struct bio *bio) submit_bio() argument
1967 bio->bi_rw |= rw; submit_bio()
1973 if (bio_has_data(bio)) { submit_bio()
1977 count = bdev_logical_block_size(bio->bi_bdev) >> 9; submit_bio()
1979 count = bio_sectors(bio); submit_bio()
1984 task_io_account_read(bio->bi_iter.bi_size); submit_bio()
1993 (unsigned long long)bio->bi_iter.bi_sector, submit_bio()
1994 bdevname(bio->bi_bdev, b), submit_bio()
1999 generic_make_request(bio); submit_bio()
2118 struct bio *bio; blk_rq_err_bytes() local
2130 for (bio = rq->bio; bio; bio = bio->bi_next) { blk_rq_err_bytes()
2131 if ((bio->bi_rw & ff) != ff) blk_rq_err_bytes()
2133 bytes += bio->bi_iter.bi_size; blk_rq_err_bytes()
2450 if (!req->bio) blk_update_request()
2454 * For fs requests, rq is just carrier of independent bio's blk_update_request()
2502 while (req->bio) { blk_update_request()
2503 struct bio *bio = req->bio; blk_update_request() local
2504 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); blk_update_request()
2506 if (bio_bytes == bio->bi_iter.bi_size) blk_update_request()
2507 req->bio = bio->bi_next; blk_update_request()
2509 req_bio_endio(req, bio, bio_bytes, error); blk_update_request()
2521 if (!req->bio) { blk_update_request()
2537 /* mixed attributes always follow the first bio */ blk_update_request()
2540 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; blk_update_request()
2845 struct bio *bio) blk_rq_bio_prep()
2847 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ blk_rq_bio_prep()
2848 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; blk_rq_bio_prep()
2850 if (bio_has_data(bio)) blk_rq_bio_prep()
2851 rq->nr_phys_segments = bio_phys_segments(q, bio); blk_rq_bio_prep()
2853 rq->__data_len = bio->bi_iter.bi_size; blk_rq_bio_prep()
2854 rq->bio = rq->biotail = bio; blk_rq_bio_prep()
2856 if (bio->bi_bdev) blk_rq_bio_prep()
2857 rq->rq_disk = bio->bi_bdev->bd_disk; blk_rq_bio_prep()
2916 struct bio *bio; blk_rq_unprep_clone() local
2918 while ((bio = rq->bio) != NULL) { blk_rq_unprep_clone()
2919 rq->bio = bio->bi_next; blk_rq_unprep_clone()
2921 bio_put(bio); blk_rq_unprep_clone()
2947 * @gfp_mask: memory allocation mask for bio
2948 * @bio_ctr: setup function to be called for each clone bio.
2963 int (*bio_ctr)(struct bio *, struct bio *, void *), blk_rq_prep_clone()
2966 struct bio *bio, *bio_src; blk_rq_prep_clone() local
2972 bio = bio_clone_fast(bio_src, gfp_mask, bs); __rq_for_each_bio()
2973 if (!bio) __rq_for_each_bio()
2976 if (bio_ctr && bio_ctr(bio, bio_src, data)) __rq_for_each_bio()
2979 if (rq->bio) { __rq_for_each_bio()
2980 rq->biotail->bi_next = bio; __rq_for_each_bio()
2981 rq->biotail = bio; __rq_for_each_bio()
2983 rq->bio = rq->biotail = bio; __rq_for_each_bio()
2991 if (bio)
2992 bio_put(bio);
3225 * that use request as their IO unit instead of those directly use bio's.
974 __get_request(struct request_list *rl, int rw_flags, struct bio *bio, gfp_t gfp_mask) __get_request() argument
1151 get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) get_request() argument
1462 bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio) bio_attempt_back_merge() argument
1484 bio_attempt_front_merge(struct request_queue *q, struct request *req, struct bio *bio) bio_attempt_front_merge() argument
2844 blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) blk_rq_bio_prep() argument
H A Dblk.h58 void init_request_from_bio(struct request *req, struct bio *bio);
60 struct bio *bio);
62 struct bio *bio);
77 struct bio *bio);
79 struct bio *bio);
80 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
182 struct bio *bio);
184 struct bio *bio);
191 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
192 int blk_try_merge(struct request *rq, struct bio *bio);
270 extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
275 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) blk_throtl_bio() argument
H A Dblk-throttle.c10 #include <linux/bio.h>
120 * will unthrottle and is ready to dispatch more bio. It is used as
138 /* Number of bio's dispatched in current slice */
314 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
315 * @bio: bio being added
316 * @qn: qnode to add bio to
319 * Add @bio to @qn and put @qn on @queued if it's not already on.
323 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio() argument
326 bio_list_add(&qn->bios, bio); throtl_qnode_add_bio()
334 * throtl_peek_queued - peek the first bio on a qnode list
337 static struct bio *throtl_peek_queued(struct list_head *queued) throtl_peek_queued()
340 struct bio *bio; throtl_peek_queued() local
345 bio = bio_list_peek(&qn->bios); throtl_peek_queued()
346 WARN_ON_ONCE(!bio); throtl_peek_queued()
347 return bio; throtl_peek_queued()
351 * throtl_pop_queued - pop the first bio form a qnode list
352 * @queued: the qnode list to pop a bio from
355 * Pop the first bio from the qnode list @queued. After popping, the first
364 static struct bio *throtl_pop_queued(struct list_head *queued, throtl_pop_queued()
368 struct bio *bio; throtl_pop_queued() local
373 bio = bio_list_pop(&qn->bios); throtl_pop_queued()
374 WARN_ON_ONCE(!bio); throtl_pop_queued()
386 return bio; throtl_pop_queued()
701 * bio dispatch. That means since start of last slice, we never used throtl_start_new_slice_with_credit()
769 * A bio has been dispatched. Also adjust slice_end. It might happen throtl_trim_slice()
771 * slice_end, but later limit was bumped up and bio was dispached throtl_trim_slice()
811 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, tg_with_in_iops_limit() argument
814 bool rw = bio_data_dir(bio); tg_with_in_iops_limit()
861 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, tg_with_in_bps_limit() argument
864 bool rw = bio_data_dir(bio); tg_with_in_bps_limit()
880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { tg_with_in_bps_limit()
887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; tg_with_in_bps_limit()
904 * Returns whether one can dispatch a bio or not. Also returns approx number
905 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
907 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, tg_may_dispatch() argument
910 bool rw = bio_data_dir(bio); tg_may_dispatch()
914 * Currently whole state machine of group depends on first bio tg_may_dispatch()
915 * queued in the group bio list. So one should not be calling tg_may_dispatch()
916 * this function with a different bio if there are other bios tg_may_dispatch()
920 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); tg_may_dispatch()
941 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && tg_may_dispatch()
942 tg_with_in_iops_limit(tg, bio, &iops_wait)) { tg_may_dispatch()
985 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) throtl_charge_bio() argument
987 bool rw = bio_data_dir(bio); throtl_charge_bio()
989 /* Charge the bio to the group */ throtl_charge_bio()
990 tg->bytes_disp[rw] += bio->bi_iter.bi_size; throtl_charge_bio()
994 * REQ_THROTTLED is used to prevent the same bio to be throttled throtl_charge_bio()
995 * more than once as a throttled bio will go through blk-throtl the throtl_charge_bio()
996 * second time when it eventually gets issued. Set it when a bio throtl_charge_bio()
999 * Dispatch stats aren't recursive and each @bio should only be throtl_charge_bio()
1002 * which is guaranteed to be for the @bio's original tg. throtl_charge_bio()
1004 if (!(bio->bi_rw & REQ_THROTTLED)) { throtl_charge_bio()
1005 bio->bi_rw |= REQ_THROTTLED; throtl_charge_bio()
1007 bio->bi_iter.bi_size, bio->bi_rw); throtl_charge_bio()
1012 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1013 * @bio: bio to add
1017 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1020 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_add_bio_tg() argument
1024 bool rw = bio_data_dir(bio); throtl_add_bio_tg()
1031 * direction, queueing @bio can change when @tg should be throtl_add_bio_tg()
1038 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); throtl_add_bio_tg()
1048 struct bio *bio; tg_update_disptime() local
1050 if ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_update_disptime()
1051 tg_may_dispatch(tg, bio, &read_wait); tg_update_disptime()
1053 if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_update_disptime()
1054 tg_may_dispatch(tg, bio, &write_wait); tg_update_disptime()
1084 struct bio *bio; tg_dispatch_one_bio() local
1087 * @bio is being transferred from @tg to @parent_sq. Popping a bio tg_dispatch_one_bio()
1090 * after @bio is transferred to @parent_sq. tg_dispatch_one_bio()
1092 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); tg_dispatch_one_bio()
1095 throtl_charge_bio(tg, bio); tg_dispatch_one_bio()
1098 * If our parent is another tg, we just need to transfer @bio to tg_dispatch_one_bio()
1100 * @td->service_queue, @bio is ready to be issued. Put it on its tg_dispatch_one_bio()
1105 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); tg_dispatch_one_bio()
1108 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], tg_dispatch_one_bio()
1126 struct bio *bio; throtl_dispatch_tg() local
1130 while ((bio = throtl_peek_queued(&sq->queued[READ])) && throtl_dispatch_tg()
1131 tg_may_dispatch(tg, bio, NULL)) { throtl_dispatch_tg()
1133 tg_dispatch_one_bio(tg, bio_data_dir(bio)); throtl_dispatch_tg()
1140 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && throtl_dispatch_tg()
1141 tg_may_dispatch(tg, bio, NULL)) { throtl_dispatch_tg()
1143 tg_dispatch_one_bio(tg, bio_data_dir(bio)); throtl_dispatch_tg()
1185 * This timer is armed when a child throtl_grp with active bio's become
1188 * dispatches bio's from the children throtl_grps to the parent
1194 * kicked so that the ready bio's are issued.
1257 * This function is queued for execution when bio's reach the bio_lists[]
1258 * of throtl_data->service_queue. Those bio's are ready and issued by this
1268 struct bio *bio; blk_throtl_dispatch_work_fn() local
1276 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) blk_throtl_dispatch_work_fn()
1277 bio_list_add(&bio_list_on_stack, bio); blk_throtl_dispatch_work_fn()
1282 while((bio = bio_list_pop(&bio_list_on_stack))) blk_throtl_dispatch_work_fn()
1283 generic_make_request(bio); blk_throtl_dispatch_work_fn()
1480 bool blk_throtl_bio(struct request_queue *q, struct bio *bio) blk_throtl_bio() argument
1486 bool rw = bio_data_dir(bio); blk_throtl_bio()
1491 if (bio->bi_rw & REQ_THROTTLED) blk_throtl_bio()
1500 blkcg = bio_blkcg(bio); blk_throtl_bio()
1505 bio->bi_iter.bi_size, bio->bi_rw); blk_throtl_bio()
1527 if (!tg_may_dispatch(tg, bio, NULL)) blk_throtl_bio()
1531 throtl_charge_bio(tg, bio); blk_throtl_bio()
1535 * otherwise it might happen that a bio is not queued for blk_throtl_bio()
1542 * So keep on trimming slice even if bio is not queued. blk_throtl_bio()
1547 * @bio passed through this layer without being throttled. blk_throtl_bio()
1559 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", blk_throtl_bio()
1561 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], blk_throtl_bio()
1565 bio_associate_current(bio); blk_throtl_bio()
1567 throtl_add_bio_tg(bio, qn, tg); blk_throtl_bio()
1572 * was empty before @bio. The forced scheduling isn't likely to blk_throtl_bio()
1573 * cause undue delay as @bio is likely to be dispatched directly if blk_throtl_bio()
1592 bio->bi_rw &= ~REQ_THROTTLED; blk_throtl_bio()
1607 struct bio *bio; tg_drain_bios() local
1611 while ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_drain_bios()
1612 tg_dispatch_one_bio(tg, bio_data_dir(bio)); tg_drain_bios()
1613 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_drain_bios()
1614 tg_dispatch_one_bio(tg, bio_data_dir(bio)); tg_drain_bios()
1630 struct bio *bio; variable in typeref:struct:bio
1653 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1655 generic_make_request(bio); variable
H A Dblk-integrity.c25 #include <linux/bio.h>
39 * @bio: bio with integrity metadata attached
42 * scatterlist corresponding to the integrity metadata in a bio.
44 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) blk_rq_count_integrity_sg() argument
52 bio_for_each_integrity_vec(iv, bio, iter) { bio_for_each_integrity_vec()
82 * @bio: bio with integrity metadata attached
89 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, blk_rq_map_integrity_sg() argument
98 bio_for_each_integrity_vec(iv, bio, iter) { bio_for_each_integrity_vec()
198 if (bio_integrity(req->bio)->bip_flags != blk_integrity_merge_rq()
199 bio_integrity(next->bio)->bip_flags) blk_integrity_merge_rq()
211 struct bio *bio) blk_integrity_merge_bio()
214 struct bio *next = bio->bi_next; blk_integrity_merge_bio()
216 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) blk_integrity_merge_bio()
219 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL) blk_integrity_merge_bio()
222 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) blk_integrity_merge_bio()
225 bio->bi_next = NULL; blk_integrity_merge_bio()
226 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); blk_integrity_merge_bio()
227 bio->bi_next = next; blk_integrity_merge_bio()
210 blk_integrity_merge_bio(struct request_queue *q, struct request *req, struct bio *bio) blk_integrity_merge_bio() argument
H A Dblk-flush.c57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
63 * bio attached to it, which is guaranteed as they aren't allowed to be
69 #include <linux/bio.h>
121 * After flush data completion, @rq->bio is %NULL but we need to blk_flush_restore_request()
122 * complete the bio again. @rq->biotail is guaranteed to equal the blk_flush_restore_request()
123 * original @rq->bio. Restore it. blk_flush_restore_request()
125 rq->bio = rq->biotail; blk_flush_restore_request()
400 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ blk_insert_flush()
453 struct bio *bio; blkdev_issue_flush() local
472 bio = bio_alloc(gfp_mask, 0); blkdev_issue_flush()
473 bio->bi_bdev = bdev; blkdev_issue_flush()
475 ret = submit_bio_wait(WRITE_FLUSH, bio); blkdev_issue_flush()
483 *error_sector = bio->bi_iter.bi_sector; blkdev_issue_flush()
485 bio_put(bio); blkdev_issue_flush()
523 /* bio based request queue hasn't flush queue */ blk_free_flush_queue()
H A Delevator.c20 * - Rework again to work with bio instead of buffer_heads
29 #include <linux/bio.h>
53 * Query io scheduler to see if the current process issuing bio may be
56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) elv_iosched_allow_merge() argument
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); elv_iosched_allow_merge()
70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) elv_rq_merge_ok() argument
72 if (!blk_rq_merge_ok(rq, bio)) elv_rq_merge_ok()
75 if (!elv_iosched_allow_merge(rq, bio)) elv_rq_merge_ok()
411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) elv_merge() argument
429 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { elv_merge()
430 ret = blk_try_merge(q->last_merge, bio); elv_merge()
443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); elv_merge()
444 if (__rq && elv_rq_merge_ok(__rq, bio)) { elv_merge()
450 return e->type->ops.elevator_merge_fn(q, req, bio); elv_merge()
530 struct bio *bio) elv_bio_merged()
535 e->type->ops.elevator_bio_merged_fn(q, rq, bio); elv_bio_merged()
703 struct bio *bio, gfp_t gfp_mask) elv_set_request()
708 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); elv_set_request()
529 elv_bio_merged(struct request_queue *q, struct request *rq, struct bio *bio) elv_bio_merged() argument
702 elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask) elv_set_request() argument
H A Dblk-mq.c10 #include <linux/bio.h>
694 struct blk_mq_ctx *ctx, struct bio *bio) blk_mq_attempt_merge()
705 if (!blk_rq_merge_ok(rq, bio)) blk_mq_attempt_merge()
708 el_ret = blk_try_merge(rq, bio); blk_mq_attempt_merge()
710 if (bio_attempt_back_merge(q, rq, bio)) { blk_mq_attempt_merge()
716 if (bio_attempt_front_merge(q, rq, bio)) { blk_mq_attempt_merge()
1150 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) blk_mq_bio_to_request() argument
1152 init_request_from_bio(rq, bio); blk_mq_bio_to_request()
1166 struct request *rq, struct bio *bio) blk_mq_merge_queue_io()
1169 blk_mq_bio_to_request(rq, bio); blk_mq_merge_queue_io()
1179 if (!blk_mq_attempt_merge(q, ctx, bio)) { blk_mq_merge_queue_io()
1180 blk_mq_bio_to_request(rq, bio); blk_mq_merge_queue_io()
1196 struct bio *bio, blk_mq_map_request()
1202 int rw = bio_data_dir(bio); blk_mq_map_request()
1206 bio_endio(bio, -EIO); blk_mq_map_request()
1213 if (rw_is_sync(bio->bi_rw)) blk_mq_map_request()
1216 trace_block_getrq(q, bio, rw); blk_mq_map_request()
1223 trace_block_sleeprq(q, bio, rw); blk_mq_map_request()
1245 static void blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_make_request() argument
1247 const int is_sync = rw_is_sync(bio->bi_rw); blk_mq_make_request()
1248 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); blk_mq_make_request()
1252 blk_queue_bounce(q, &bio); blk_mq_make_request()
1254 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { blk_mq_make_request()
1255 bio_endio(bio, -EIO); blk_mq_make_request()
1259 rq = blk_mq_map_request(q, bio, &data); blk_mq_make_request()
1264 blk_mq_bio_to_request(rq, bio); blk_mq_make_request()
1282 blk_mq_bio_to_request(rq, bio); blk_mq_make_request()
1303 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { blk_mq_make_request()
1321 static void blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_sq_make_request() argument
1323 const int is_sync = rw_is_sync(bio->bi_rw); blk_sq_make_request()
1324 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); blk_sq_make_request()
1335 blk_queue_bounce(q, &bio); blk_sq_make_request()
1337 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { blk_sq_make_request()
1338 bio_endio(bio, -EIO); blk_sq_make_request()
1343 blk_attempt_plug_merge(q, bio, &request_count)) blk_sq_make_request()
1346 rq = blk_mq_map_request(q, bio, &data); blk_sq_make_request()
1351 blk_mq_bio_to_request(rq, bio); blk_sq_make_request()
1365 blk_mq_bio_to_request(rq, bio); blk_sq_make_request()
1378 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { blk_sq_make_request()
693 blk_mq_attempt_merge(struct request_queue *q, struct blk_mq_ctx *ctx, struct bio *bio) blk_mq_attempt_merge() argument
1164 blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq, struct bio *bio) blk_mq_merge_queue_io() argument
1195 blk_mq_map_request(struct request_queue *q, struct bio *bio, struct blk_map_ctx *data) blk_mq_map_request() argument
H A Dblk-cgroup.h190 static inline struct blkcg *bio_blkcg(struct bio *bio) bio_blkcg() argument
192 if (bio && bio->bi_css) bio_blkcg()
193 return css_to_blkcg(bio->bi_css); bio_blkcg()
317 * @bio: bio which will be attached to the allocated request (may be %NULL)
319 * The caller wants to allocate a request from @q to use for @bio. Find
325 struct bio *bio) blk_get_rl()
332 blkcg = bio_blkcg(bio); blk_get_rl()
584 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } bio_blkcg() argument
594 struct bio *bio) { return &q->root_rl; } blk_put_rl()
324 blk_get_rl(struct request_queue *q, struct bio *bio) blk_get_rl() argument
593 blk_get_rl(struct request_queue *q, struct bio *bio) blk_get_rl() argument
H A Dbsg.c84 struct bio *bio; member in struct:bsg_command
85 struct bio *bidi_bio;
292 blk_rq_unmap_user(next_rq->bio); bsg_map_hdr()
308 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", bsg_rq_end_io()
309 bd->name, rq, bc, bc->bio, uptodate); bsg_rq_end_io()
334 bc->bio = rq->bio; bsg_add_command()
336 bc->bidi_bio = rq->next_rq->bio; bsg_add_command()
394 struct bio *bio, struct bio *bidi_bio) blk_complete_sgv4_hdr_rq()
398 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); blk_complete_sgv4_hdr_rq()
441 blk_rq_unmap_user(bio); blk_complete_sgv4_hdr_rq()
501 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bsg_complete_all_commands()
536 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, __bsg_read()
908 struct bio *bio, *bidi_bio = NULL; bsg_ioctl() local
920 bio = rq->bio; bsg_ioctl()
922 bidi_bio = rq->next_rq->bio; bsg_ioctl()
926 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); bsg_ioctl()
393 blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) blk_complete_sgv4_hdr_rq() argument
H A Ddeadline-iosched.c10 #include <linux/bio.h>
125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) deadline_merge() argument
135 sector_t sector = bio_end_sector(bio); deadline_merge()
137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); deadline_merge()
141 if (elv_rq_merge_ok(__rq, bio)) { deadline_merge()
H A Dscsi_ioctl.c252 struct bio *bio) blk_complete_sghdr_rq()
279 r = blk_rq_unmap_user(bio); blk_complete_sghdr_rq()
295 struct bio *bio; sg_io() local
356 bio = rq->bio; sg_io()
372 ret = blk_complete_sghdr_rq(rq, hdr, bio); sg_io()
251 blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, struct bio *bio) blk_complete_sghdr_rq() argument
H A Dnoop-iosched.c6 #include <linux/bio.h>
H A Dcfq-iosched.c861 struct cfq_io_cq *cic, struct bio *bio,
898 static inline bool cfq_bio_sync(struct bio *bio) cfq_bio_sync() argument
900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); cfq_bio_sync()
2282 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) cfq_find_rq_fmerge() argument
2292 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); cfq_find_rq_fmerge()
2294 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); cfq_find_rq_fmerge()
2339 struct bio *bio) cfq_merge()
2344 __rq = cfq_find_rq_fmerge(cfqd, bio); cfq_merge()
2345 if (__rq && elv_rq_merge_ok(__rq, bio)) { cfq_merge()
2364 struct bio *bio) cfq_bio_merged()
2366 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); cfq_bio_merged()
2403 struct bio *bio) cfq_allow_merge()
2410 * Disallow merge of a sync bio into an async request. cfq_allow_merge()
2412 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) cfq_allow_merge()
2416 * Lookup the cfqq that this bio will be queued with and allow cfq_allow_merge()
2423 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); cfq_allow_merge()
3494 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) check_ioprio_changed() argument
3510 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, check_ioprio_changed()
3546 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) check_blkcg_changed() argument
3553 serial_nr = bio_blkcg(bio)->css.serial_nr; check_blkcg_changed()
3577 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } check_blkcg_changed() argument
3582 struct bio *bio, gfp_t gfp_mask) cfq_find_alloc_queue()
3591 blkcg = bio_blkcg(bio); cfq_find_alloc_queue()
3662 struct bio *bio, gfp_t gfp_mask) cfq_get_queue()
3680 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); cfq_get_queue()
4215 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, cfq_set_request() argument
4228 check_ioprio_changed(cic, bio); cfq_set_request()
4229 check_blkcg_changed(cic, bio); cfq_set_request()
4233 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); cfq_set_request()
2338 cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) cfq_merge() argument
2363 cfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) cfq_bio_merged() argument
2402 cfq_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) cfq_allow_merge() argument
3581 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct bio *bio, gfp_t gfp_mask) cfq_find_alloc_queue() argument
3661 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct bio *bio, gfp_t gfp_mask) cfq_get_queue() argument
H A Dblk-exec.c6 #include <linux/bio.h>
H A Dbsg-lib.c132 if (req->bio) { bsg_create_job()
137 if (rsp && rsp->bio) { bsg_create_job()
H A Dblk-iopoll.c8 #include <linux/bio.h>
H A Dblk-softirq.c7 #include <linux/bio.h>
H A Dblk-settings.c7 #include <linux/bio.h>
64 * add a new bio_vec to a bio at a given offset or not. If the block device
66 * the size of bio's sent to it. Note that a block device *must* allow a
67 * single page to be added to an empty bio. The block device driver may want
68 * to use the bio_split() function to deal with these bio's. By default
/linux-4.1.27/drivers/md/
H A Ddm-bio-record.h10 #include <linux/bio.h>
13 * There are lots of mutable fields in the bio struct that get
15 * such as multipath, may wish to resubmit a bio on error. The
17 * original bio state.
26 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) dm_bio_record() argument
28 bd->bi_bdev = bio->bi_bdev; dm_bio_record()
29 bd->bi_flags = bio->bi_flags; dm_bio_record()
30 bd->bi_iter = bio->bi_iter; dm_bio_record()
33 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) dm_bio_restore() argument
35 bio->bi_bdev = bd->bi_bdev; dm_bio_restore()
36 bio->bi_flags = bd->bi_flags; dm_bio_restore()
37 bio->bi_iter = bd->bi_iter; dm_bio_restore()
H A Dmultipath.h26 struct bio *master_bio;
27 struct bio bio; member in struct:multipath_bh
H A Ddm-raid1.c8 #include "dm-bio-record.h"
119 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) queue_bio() argument
128 bio_list_add(bl, bio); queue_bio()
138 struct bio *bio; dispatch_bios() local
140 while ((bio = bio_list_pop(bio_list))) dispatch_bios()
141 queue_bio(ms, bio, WRITE); dispatch_bios()
161 static struct mirror *bio_get_m(struct bio *bio) bio_get_m() argument
163 return (struct mirror *) bio->bi_next; bio_get_m()
166 static void bio_set_m(struct bio *bio, struct mirror *m) bio_set_m() argument
168 bio->bi_next = (struct bio *) m; bio_set_m()
429 static int mirror_available(struct mirror_set *ms, struct bio *bio) mirror_available() argument
432 region_t region = dm_rh_bio_to_region(ms->rh, bio); mirror_available()
435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; mirror_available()
443 static sector_t map_sector(struct mirror *m, struct bio *bio) map_sector() argument
445 if (unlikely(!bio->bi_iter.bi_size)) map_sector()
447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); map_sector()
450 static void map_bio(struct mirror *m, struct bio *bio) map_bio() argument
452 bio->bi_bdev = m->dev->bdev; map_bio()
453 bio->bi_iter.bi_sector = map_sector(m, bio); map_bio()
457 struct bio *bio) map_region()
460 io->sector = map_sector(m, bio); map_region()
461 io->count = bio_sectors(bio); map_region()
464 static void hold_bio(struct mirror_set *ms, struct bio *bio) hold_bio() argument
476 * If device is suspended, complete the bio. hold_bio()
479 bio_endio(bio, DM_ENDIO_REQUEUE); hold_bio()
481 bio_endio(bio, -EIO); hold_bio()
486 * Hold bio until the suspend is complete. hold_bio()
488 bio_list_add(&ms->holds, bio); hold_bio()
497 struct bio *bio = context; read_callback() local
500 m = bio_get_m(bio); read_callback()
501 bio_set_m(bio, NULL); read_callback()
504 bio_endio(bio, 0); read_callback()
510 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { read_callback()
514 queue_bio(m->ms, bio, bio_rw(bio)); read_callback()
520 bio_endio(bio, -EIO); read_callback()
524 static void read_async_bio(struct mirror *m, struct bio *bio) read_async_bio() argument
530 .mem.ptr.bio = bio, read_async_bio()
532 .notify.context = bio, read_async_bio()
536 map_region(&io, m, bio); read_async_bio()
537 bio_set_m(bio, m); read_async_bio()
551 struct bio *bio; do_reads() local
554 while ((bio = bio_list_pop(reads))) { do_reads()
555 region = dm_rh_bio_to_region(ms->rh, bio); do_reads()
562 m = choose_mirror(ms, bio->bi_iter.bi_sector); do_reads()
567 read_async_bio(m, bio); do_reads()
569 bio_endio(bio, -EIO); do_reads()
588 struct bio *bio = (struct bio *) context; write_callback() local
593 ms = bio_get_m(bio)->ms; write_callback()
594 bio_set_m(bio, NULL); write_callback()
603 bio_endio(bio, ret); write_callback()
608 * If the bio is discard, return an error, but do not write_callback()
611 if (bio->bi_rw & REQ_DISCARD) { write_callback()
612 bio_endio(bio, -EOPNOTSUPP); write_callback()
628 bio_list_add(&ms->failures, bio); write_callback()
634 static void do_write(struct mirror_set *ms, struct bio *bio) do_write() argument
640 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), do_write()
642 .mem.ptr.bio = bio, do_write()
644 .notify.context = bio, do_write()
648 if (bio->bi_rw & REQ_DISCARD) { do_write()
655 map_region(dest++, m, bio); do_write()
661 bio_set_m(bio, get_default_mirror(ms)); do_write()
669 struct bio *bio; do_writes() local
686 while ((bio = bio_list_pop(writes))) { do_writes()
687 if ((bio->bi_rw & REQ_FLUSH) || do_writes()
688 (bio->bi_rw & REQ_DISCARD)) { do_writes()
689 bio_list_add(&sync, bio); do_writes()
693 region = dm_rh_bio_to_region(ms->rh, bio); do_writes()
697 bio_list_add(&requeue, bio); do_writes()
717 bio_list_add(this_list, bio); do_writes()
755 while ((bio = bio_list_pop(&sync))) do_writes()
756 do_write(ms, bio); do_writes()
758 while ((bio = bio_list_pop(&recover))) do_writes()
759 dm_rh_delay(ms->rh, bio); do_writes()
761 while ((bio = bio_list_pop(&nosync))) { do_writes()
764 bio_list_add(&ms->failures, bio); do_writes()
768 map_bio(get_default_mirror(ms), bio); do_writes() local
769 generic_make_request(bio); do_writes()
776 struct bio *bio; do_failures() local
798 while ((bio = bio_list_pop(failures))) { do_failures()
801 dm_rh_mark_nosync(ms->rh, bio); do_failures()
806 * If we have been told to handle errors, hold the bio do_failures()
813 bio_endio(bio, -EIO); do_failures()
815 hold_bio(ms, bio); do_failures()
817 bio_endio(bio, 0); do_failures()
1156 static int mirror_map(struct dm_target *ti, struct bio *bio) mirror_map() argument
1158 int r, rw = bio_rw(bio); mirror_map()
1163 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); mirror_map()
1169 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); mirror_map()
1170 queue_bio(ms, bio, rw); mirror_map()
1174 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); mirror_map()
1179 * If region is not in-sync queue the bio. mirror_map()
1185 queue_bio(ms, bio, rw); mirror_map()
1193 m = choose_mirror(ms, bio->bi_iter.bi_sector); mirror_map()
1197 dm_bio_record(&bio_record->details, bio); mirror_map()
1200 map_bio(m, bio); mirror_map()
1205 static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) mirror_end_io() argument
1207 int rw = bio_rw(bio); mirror_end_io()
1212 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); mirror_end_io()
1218 if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) mirror_end_io()
1226 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) mirror_end_io()
1251 if (default_ok(m) || mirror_available(ms, bio)) { mirror_end_io()
1254 dm_bio_restore(bd, bio); mirror_end_io()
1257 atomic_inc(&bio->bi_remaining); mirror_end_io()
1259 queue_bio(ms, bio, rw); mirror_end_io()
1277 struct bio *bio; mirror_presuspend() local
1283 * for bios in the hold list. After the process, no bio has mirror_presuspend()
1292 while ((bio = bio_list_pop(&holds))) mirror_presuspend()
1293 hold_bio(ms, bio); mirror_presuspend()
456 map_region(struct dm_io_region *io, struct mirror *m, struct bio *bio) map_region() argument
H A Ddm-delay.c13 #include <linux/bio.h>
64 static void flush_bios(struct bio *bio) flush_bios() argument
66 struct bio *n; flush_bios()
68 while (bio) { flush_bios()
69 n = bio->bi_next; flush_bios()
70 bio->bi_next = NULL; flush_bios()
71 generic_make_request(bio); flush_bios()
72 bio = n; flush_bios()
76 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) flush_delayed_bios()
86 struct bio *bio = dm_bio_from_per_bio_data(delayed, flush_delayed_bios() local
89 bio_list_add(&flush_bios, bio); flush_delayed_bios()
90 if ((bio_data_dir(bio) == WRITE)) flush_delayed_bios()
228 static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) delay_bio() argument
236 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); delay_bio()
243 if (bio_data_dir(bio) == WRITE) delay_bio()
273 static int delay_map(struct dm_target *ti, struct bio *bio) delay_map() argument
277 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { delay_map()
278 bio->bi_bdev = dc->dev_write->bdev; delay_map()
279 if (bio_sectors(bio)) delay_map()
280 bio->bi_iter.bi_sector = dc->start_write + delay_map()
281 dm_target_offset(ti, bio->bi_iter.bi_sector); delay_map()
283 return delay_bio(dc, dc->write_delay, bio); delay_map()
286 bio->bi_bdev = dc->dev_read->bdev; delay_map()
287 bio->bi_iter.bi_sector = dc->start_read + delay_map()
288 dm_target_offset(ti, bio->bi_iter.bi_sector); delay_map()
290 return delay_bio(dc, dc->read_delay, bio); delay_map()
H A Ddm-log-writes.c12 #include <linux/bio.h>
149 static void log_end_io(struct bio *bio, int err) log_end_io() argument
151 struct log_writes_c *lc = bio->bi_private; log_end_io()
164 bio_for_each_segment_all(bvec, bio, i) log_end_io()
168 bio_put(bio); log_end_io()
193 struct bio *bio; write_metadata() local
198 bio = bio_alloc(GFP_KERNEL, 1); write_metadata()
199 if (!bio) { write_metadata()
200 DMERR("Couldn't alloc log bio"); write_metadata()
203 bio->bi_iter.bi_size = 0; write_metadata()
204 bio->bi_iter.bi_sector = sector; write_metadata()
205 bio->bi_bdev = lc->logdev->bdev; write_metadata()
206 bio->bi_end_io = log_end_io; write_metadata()
207 bio->bi_private = lc; write_metadata()
208 set_bit(BIO_UPTODATE, &bio->bi_flags); write_metadata()
213 bio_put(bio); write_metadata()
225 ret = bio_add_page(bio, page, lc->sectorsize, 0); write_metadata()
230 submit_bio(WRITE, bio); write_metadata()
233 bio_put(bio); write_metadata()
243 struct bio *bio; log_one_block() local
262 bio = bio_alloc(GFP_KERNEL, block->vec_cnt); log_one_block()
263 if (!bio) { log_one_block()
264 DMERR("Couldn't alloc log bio"); log_one_block()
268 bio->bi_iter.bi_size = 0; log_one_block()
269 bio->bi_iter.bi_sector = sector; log_one_block()
270 bio->bi_bdev = lc->logdev->bdev; log_one_block()
271 bio->bi_end_io = log_end_io; log_one_block()
272 bio->bi_private = lc; log_one_block()
273 set_bit(BIO_UPTODATE, &bio->bi_flags); log_one_block()
278 * for every bvec in the original bio for simplicity sake. log_one_block()
280 ret = bio_add_page(bio, block->vecs[i].bv_page, log_one_block()
284 submit_bio(WRITE, bio); log_one_block()
285 bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); log_one_block()
286 if (!bio) { log_one_block()
287 DMERR("Couldn't alloc log bio"); log_one_block()
290 bio->bi_iter.bi_size = 0; log_one_block()
291 bio->bi_iter.bi_sector = sector; log_one_block()
292 bio->bi_bdev = lc->logdev->bdev; log_one_block()
293 bio->bi_end_io = log_end_io; log_one_block()
294 bio->bi_private = lc; log_one_block()
295 set_bit(BIO_UPTODATE, &bio->bi_flags); log_one_block()
297 ret = bio_add_page(bio, block->vecs[i].bv_page, log_one_block()
300 DMERR("Couldn't add page on new bio?"); log_one_block()
301 bio_put(bio); log_one_block()
307 submit_bio(WRITE, bio); log_one_block()
537 static void normal_map_bio(struct dm_target *ti, struct bio *bio) normal_map_bio() argument
541 bio->bi_bdev = lc->dev->bdev; normal_map_bio()
544 static int log_writes_map(struct dm_target *ti, struct bio *bio) log_writes_map() argument
547 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); log_writes_map()
553 bool flush_bio = (bio->bi_rw & REQ_FLUSH); log_writes_map()
554 bool fua_bio = (bio->bi_rw & REQ_FUA); log_writes_map()
555 bool discard_bio = (bio->bi_rw & REQ_DISCARD); log_writes_map()
566 if (bio_data_dir(bio) == READ) log_writes_map()
570 if (!bio_sectors(bio) && !flush_bio) log_writes_map()
580 alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio); log_writes_map()
601 block->sector = bio->bi_iter.bi_sector; log_writes_map()
602 block->nr_sectors = bio_sectors(bio); log_writes_map()
609 bio_endio(bio, 0); log_writes_map()
613 /* Flush bio, splice the unflushed blocks onto this list and submit */ log_writes_map()
614 if (flush_bio && !bio_sectors(bio)) { log_writes_map()
622 * We will write this bio somewhere else way later so we need to copy log_writes_map()
626 * We do this because this could be a bio from O_DIRECT in which case we log_writes_map()
630 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
662 normal_map_bio(ti, bio);
666 static int normal_end_io(struct dm_target *ti, struct bio *bio, int error) normal_end_io() argument
669 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); normal_end_io()
671 if (bio_data_dir(bio) == WRITE && pb->block) { normal_end_io()
H A Ddm-flakey.c13 #include <linux/bio.h>
18 #define all_corrupt_bio_flags_match(bio, fc) \
19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
55 {1, UINT_MAX, "Invalid corrupt bio byte"}, parse_features()
56 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, parse_features()
57 {0, UINT_MAX, "Invalid corrupt bio flags mask"}, parse_features()
107 ti->error = "Invalid corrupt bio direction (r or w)"; parse_features()
245 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) flakey_map_bio() argument
249 bio->bi_bdev = fc->dev->bdev; flakey_map_bio()
250 if (bio_sectors(bio)) flakey_map_bio()
251 bio->bi_iter.bi_sector = flakey_map_bio()
252 flakey_map_sector(ti, bio->bi_iter.bi_sector); flakey_map_bio()
255 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) corrupt_bio_data() argument
257 unsigned bio_bytes = bio_cur_bytes(bio); corrupt_bio_data()
258 char *data = bio_data(bio); corrupt_bio_data()
266 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " corrupt_bio_data()
268 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, corrupt_bio_data()
269 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, corrupt_bio_data()
270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); corrupt_bio_data()
274 static int flakey_map(struct dm_target *ti, struct bio *bio) flakey_map() argument
278 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); flakey_map()
285 * Flag this bio as submitted while down. flakey_map()
292 if (bio_data_dir(bio) == READ) flakey_map()
299 bio_endio(bio, 0); flakey_map()
307 if (all_corrupt_bio_flags_match(bio, fc)) flakey_map()
308 corrupt_bio_data(bio, fc); flakey_map()
319 flakey_map_bio(ti, bio); flakey_map()
324 static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) flakey_end_io() argument
327 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); flakey_end_io()
334 (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && flakey_end_io()
335 all_corrupt_bio_flags_match(bio, fc)) flakey_end_io()
336 corrupt_bio_data(bio, fc); flakey_end_io()
H A Ddm-zero.c11 #include <linux/bio.h>
36 static int zero_map(struct dm_target *ti, struct bio *bio) zero_map() argument
38 switch(bio_rw(bio)) { zero_map()
40 zero_fill_bio(bio); zero_map()
50 bio_endio(bio, 0); zero_map()
52 /* accepted bio, don't make new request */ zero_map()
H A Dfaulty.c44 * we clone the bio and insert a new b_end_io into the chain.
73 static void faulty_fail(struct bio *bio, int error) faulty_fail() argument
75 struct bio *b = bio->bi_private; faulty_fail()
77 b->bi_iter.bi_size = bio->bi_iter.bi_size; faulty_fail()
78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; faulty_fail()
80 bio_put(bio); faulty_fail()
173 static void make_request(struct mddev *mddev, struct bio *bio) make_request() argument
178 if (bio_data_dir(bio) == WRITE) { make_request()
184 bio_endio(bio, -EIO); make_request()
188 if (check_sector(conf, bio->bi_iter.bi_sector, make_request()
189 bio_end_sector(bio), WRITE)) make_request()
192 add_sector(conf, bio->bi_iter.bi_sector, make_request()
200 if (check_sector(conf, bio->bi_iter.bi_sector, make_request()
201 bio_end_sector(bio), READ)) make_request()
206 add_sector(conf, bio->bi_iter.bi_sector, make_request()
211 add_sector(conf, bio->bi_iter.bi_sector, make_request()
217 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); make_request()
220 b->bi_private = bio; make_request()
222 bio = b; make_request()
224 bio->bi_bdev = conf->rdev->bdev; make_request()
226 generic_make_request(bio); make_request()
H A Draid1.c51 * correct the read error. To keep track of bad blocks on a per-bio
54 #define IO_BLOCKED ((struct bio *)1)
57 * the success by setting devs[n].bio to IO_MADE_GOOD
59 #define IO_MADE_GOOD ((struct bio *)2)
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
99 struct bio *bio; r1buf_pool_alloc() local
111 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); r1buf_pool_alloc()
112 if (!bio) r1buf_pool_alloc()
114 r1_bio->bios[j] = bio; r1buf_pool_alloc()
118 * the first bio. r1buf_pool_alloc()
120 * RESYNC_PAGES for each bio. r1buf_pool_alloc()
127 bio = r1_bio->bios[j]; r1buf_pool_alloc()
128 bio->bi_vcnt = RESYNC_PAGES; r1buf_pool_alloc()
130 if (bio_alloc_pages(bio, gfp_flags)) r1buf_pool_alloc()
184 struct bio **bio = r1_bio->bios + i; put_all_bios() local
185 if (!BIO_SPECIAL(*bio)) put_all_bios()
186 bio_put(*bio); put_all_bios()
187 *bio = NULL; put_all_bios()
205 struct bio *bio = r1_bio->bios[i]; put_buf() local
206 if (bio->bi_end_io) put_buf()
237 struct bio *bio = r1_bio->master_bio; call_bio_endio() local
241 sector_t bi_sector = bio->bi_iter.bi_sector; call_bio_endio()
243 if (bio->bi_phys_segments) { call_bio_endio()
246 bio->bi_phys_segments--; call_bio_endio()
247 done = (bio->bi_phys_segments == 0); call_bio_endio()
258 clear_bit(BIO_UPTODATE, &bio->bi_flags); call_bio_endio()
260 bio_endio(bio, 0); call_bio_endio()
271 struct bio *bio = r1_bio->master_bio; raid_end_bio_io() local
276 (bio_data_dir(bio) == WRITE) ? "write" : "read", raid_end_bio_io()
277 (unsigned long long) bio->bi_iter.bi_sector, raid_end_bio_io()
278 (unsigned long long) bio_end_sector(bio) - 1); raid_end_bio_io()
297 * Find the disk number which triggered given bio
299 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) find_bio_disk() argument
306 if (r1_bio->bios[mirror] == bio) find_bio_disk()
315 static void raid1_end_read_request(struct bio *bio, int error) raid1_end_read_request() argument
317 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); raid1_end_read_request()
318 struct r1bio *r1_bio = bio->bi_private; raid1_end_read_request()
400 static void raid1_end_write_request(struct bio *bio, int error) raid1_end_write_request() argument
402 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); raid1_end_write_request()
403 struct r1bio *r1_bio = bio->bi_private; raid1_end_write_request()
406 struct bio *to_put = NULL; raid1_end_write_request()
408 mirror = find_bio_disk(r1_bio, bio); raid1_end_write_request()
424 * Set R1BIO_Uptodate in our master bio, so that we raid1_end_write_request()
431 * will wait for the 'master' bio. raid1_end_write_request()
437 to_put = bio; raid1_end_write_request()
464 * In behind mode, we ACK the master bio once the I/O raid1_end_write_request()
474 struct bio *mbio = r1_bio->master_bio; raid1_end_write_request()
781 struct bio *bio; flush_pending_writes() local
782 bio = bio_list_get(&conf->pending_bio_list); flush_pending_writes()
790 while (bio) { /* submit pending writes */ flush_pending_writes()
791 struct bio *next = bio->bi_next; flush_pending_writes()
792 bio->bi_next = NULL; flush_pending_writes()
793 if (unlikely((bio->bi_rw & REQ_DISCARD) && flush_pending_writes()
794 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) flush_pending_writes()
796 bio_endio(bio, 0); flush_pending_writes()
798 generic_make_request(bio); flush_pending_writes()
799 bio = next; flush_pending_writes()
870 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) need_to_wait_for_sync() argument
874 if (conf->array_frozen || !bio) need_to_wait_for_sync()
876 else if (conf->barrier && bio_data_dir(bio) == WRITE) { need_to_wait_for_sync()
878 >= bio_end_sector(bio)) || need_to_wait_for_sync()
880 <= bio->bi_iter.bi_sector)) need_to_wait_for_sync()
889 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) wait_barrier() argument
894 if (need_to_wait_for_sync(conf, bio)) { wait_barrier()
900 * per-process bio queue isn't empty, wait_barrier()
916 if (bio && bio_data_dir(bio) == WRITE) { wait_barrier()
917 if (bio->bi_iter.bi_sector >= wait_barrier()
925 <= bio->bi_iter.bi_sector) wait_barrier()
1002 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) alloc_behind_pages() argument
1006 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), alloc_behind_pages()
1011 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
1022 r1_bio->behind_page_count = bio->bi_vcnt;
1027 for (i = 0; i < bio->bi_vcnt; i++)
1032 bio->bi_iter.bi_size);
1047 struct bio *bio; raid1_unplug() local
1061 bio = bio_list_get(&plug->pending); raid1_unplug()
1065 while (bio) { /* submit pending writes */ raid1_unplug()
1066 struct bio *next = bio->bi_next; raid1_unplug()
1067 bio->bi_next = NULL; raid1_unplug()
1068 if (unlikely((bio->bi_rw & REQ_DISCARD) && raid1_unplug()
1069 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) raid1_unplug()
1071 bio_endio(bio, 0); raid1_unplug()
1073 generic_make_request(bio); raid1_unplug()
1074 bio = next; raid1_unplug()
1079 static void make_request(struct mddev *mddev, struct bio * bio) make_request() argument
1084 struct bio *read_bio; make_request()
1088 const int rw = bio_data_dir(bio); make_request()
1089 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); make_request()
1090 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); make_request()
1091 const unsigned long do_discard = (bio->bi_rw make_request()
1093 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); make_request()
1108 md_write_start(mddev, bio); /* wait on superblock update early */ make_request()
1110 if (bio_data_dir(bio) == WRITE && make_request()
1111 ((bio_end_sector(bio) > mddev->suspend_lo && make_request()
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || make_request()
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { make_request()
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || make_request()
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || make_request()
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) make_request()
1135 start_next_window = wait_barrier(conf, bio); make_request()
1146 r1_bio->master_bio = bio; make_request()
1147 r1_bio->sectors = bio_sectors(bio); make_request()
1150 r1_bio->sector = bio->bi_iter.bi_sector; make_request()
1154 * track of the number of reads in bio->bi_phys_segments. make_request()
1159 bio->bi_phys_segments = 0; make_request()
1160 clear_bit(BIO_SEG_VALID, &bio->bi_flags); make_request()
1190 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); make_request()
1191 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, make_request()
1209 - bio->bi_iter.bi_sector); make_request()
1212 if (bio->bi_phys_segments == 0) make_request()
1213 bio->bi_phys_segments = 2; make_request()
1215 bio->bi_phys_segments++; make_request()
1220 * for it. So hand bio over to raid1d. make_request()
1226 r1_bio->master_bio = bio; make_request()
1227 r1_bio->sectors = bio_sectors(bio) - sectors_handled; make_request()
1230 r1_bio->sector = bio->bi_iter.bi_sector + make_request()
1248 * bios[x] to bio make_request()
1321 r1_bio->bios[i] = bio; make_request()
1334 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); make_request()
1336 start_next_window = wait_barrier(conf, bio); make_request()
1338 * We must make sure the multi r1bios of bio have make_request()
1341 if (bio->bi_phys_segments && old && make_request()
1345 bio->bi_phys_segments == 1); make_request()
1355 if (bio->bi_phys_segments == 0) make_request()
1356 bio->bi_phys_segments = 2; make_request()
1358 bio->bi_phys_segments++; make_request()
1361 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; make_request()
1368 struct bio *mbio; make_request()
1372 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); make_request()
1373 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); make_request()
1397 * We trimmed the bio, so _all is legit make_request()
1435 * as it could result in the bio being freed. make_request()
1437 if (sectors_handled < bio_sectors(bio)) { make_request()
1440 * in bio->bi_phys_segments make_request()
1443 r1_bio->master_bio = bio; make_request()
1444 r1_bio->sectors = bio_sectors(bio) - sectors_handled; make_request()
1447 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; make_request()
1739 static void end_sync_read(struct bio *bio, int error) end_sync_read() argument
1741 struct r1bio *r1_bio = bio->bi_private; end_sync_read()
1750 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) end_sync_read()
1757 static void end_sync_write(struct bio *bio, int error) end_sync_write() argument
1759 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_sync_write()
1760 struct r1bio *r1_bio = bio->bi_private; end_sync_write()
1767 mirror = find_bio_disk(r1_bio, bio); end_sync_write()
1844 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; fix_sync_read_error() local
1866 bio->bi_io_vec[idx].bv_page, fix_sync_read_error()
1888 bdevname(bio->bi_bdev, b), fix_sync_read_error()
1922 bio->bi_io_vec[idx].bv_page, fix_sync_read_error()
1937 bio->bi_io_vec[idx].bv_page, fix_sync_read_error()
1946 set_bit(BIO_UPTODATE, &bio->bi_flags); fix_sync_read_error()
1971 struct bio *b = r1_bio->bios[i]; process_checks()
1974 /* fixup the bio for reuse, but preserve BIO_UPTODATE */ process_checks()
2009 struct bio *pbio = r1_bio->bios[primary]; process_checks()
2010 struct bio *sbio = r1_bio->bios[i]; process_checks()
2049 struct bio *bio, *wbio; sync_request_write() local
2051 bio = r1_bio->bios[r1_bio->read_disk]; sync_request_write()
2194 /* bio has the data to be written to device 'i' where narrow_write_error()
2196 * We repeatedly clone the bio and trim down to one block, narrow_write_error()
2199 * It is conceivable that the bio doesn't exactly align with narrow_write_error()
2222 struct bio *wbio; narrow_write_error()
2271 struct bio *bio = r1_bio->bios[m]; handle_sync_write_finished() local
2272 if (bio->bi_end_io == NULL) handle_sync_write_finished()
2274 if (test_bit(BIO_UPTODATE, &bio->bi_flags) && handle_sync_write_finished()
2278 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && handle_sync_write_finished()
2322 struct bio *bio; handle_read_error() local
2344 bio = r1_bio->bios[r1_bio->read_disk]; handle_read_error()
2345 bdevname(bio->bi_bdev, b); handle_read_error()
2356 if (bio) { handle_read_error()
2359 bio_put(bio); handle_read_error()
2362 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); handle_read_error()
2363 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, handle_read_error()
2365 r1_bio->bios[r1_bio->read_disk] = bio; handle_read_error()
2373 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; handle_read_error()
2374 bio->bi_bdev = rdev->bdev; handle_read_error()
2375 bio->bi_end_io = raid1_end_read_request; handle_read_error()
2376 bio->bi_rw = READ | do_sync; handle_read_error()
2377 bio->bi_private = r1_bio; handle_read_error()
2380 struct bio *mbio = r1_bio->master_bio; handle_read_error()
2390 generic_make_request(bio); handle_read_error()
2391 bio = NULL; handle_read_error()
2405 generic_make_request(bio); handle_read_error()
2489 struct bio *bio; sync_request() local
2561 bio = r1_bio->bios[i]; sync_request()
2562 bio_reset(bio); sync_request()
2570 bio->bi_rw = WRITE; sync_request()
2571 bio->bi_end_io = end_sync_write; sync_request()
2597 bio->bi_rw = READ; sync_request()
2598 bio->bi_end_io = end_sync_read; sync_request()
2609 bio->bi_rw = WRITE; sync_request()
2610 bio->bi_end_io = end_sync_write; sync_request()
2614 if (bio->bi_end_io) { sync_request()
2616 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; sync_request()
2617 bio->bi_bdev = rdev->bdev; sync_request()
2618 bio->bi_private = r1_bio; sync_request()
2703 bio = r1_bio->bios[i]; sync_request()
2704 if (bio->bi_end_io) { sync_request()
2705 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; sync_request()
2706 if (bio_add_page(bio, page, len, 0) == 0) { sync_request()
2708 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; sync_request()
2711 bio = r1_bio->bios[i]; sync_request()
2712 if (bio->bi_end_io==NULL) sync_request()
2714 /* remove last page from this bio */ sync_request()
2715 bio->bi_vcnt--; sync_request()
2716 bio->bi_iter.bi_size -= len; sync_request()
2717 __clear_bit(BIO_SEG_VALID, &bio->bi_flags); sync_request()
2736 bio = r1_bio->bios[i]; sync_request()
2737 if (bio->bi_end_io == end_sync_read) { sync_request()
2739 md_sync_acct(bio->bi_bdev, nr_sectors); sync_request()
2740 generic_make_request(bio); sync_request()
2745 bio = r1_bio->bios[r1_bio->read_disk]; sync_request()
2746 md_sync_acct(bio->bi_bdev, nr_sectors); sync_request()
2747 generic_make_request(bio); sync_request()
H A Ddm-linear.c11 #include <linux/bio.h>
82 static void linear_map_bio(struct dm_target *ti, struct bio *bio) linear_map_bio() argument
86 bio->bi_bdev = lc->dev->bdev; linear_map_bio()
87 if (bio_sectors(bio)) linear_map_bio()
88 bio->bi_iter.bi_sector = linear_map_bio()
89 linear_map_sector(ti, bio->bi_iter.bi_sector); linear_map_bio()
92 static int linear_map(struct dm_target *ti, struct bio *bio) linear_map() argument
94 linear_map_bio(ti, bio); linear_map()
H A Dmultipath.c77 struct bio *bio = mp_bh->master_bio; multipath_end_bh_io() local
80 bio_endio(bio, err); multipath_end_bh_io()
84 static void multipath_end_request(struct bio *bio, int error) multipath_end_request() argument
86 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); multipath_end_request()
87 struct multipath_bh *mp_bh = bio->bi_private; multipath_end_request()
93 else if (!(bio->bi_rw & REQ_RAHEAD)) { multipath_end_request()
101 (unsigned long long)bio->bi_iter.bi_sector); multipath_end_request()
108 static void multipath_make_request(struct mddev *mddev, struct bio * bio) multipath_make_request() argument
114 if (unlikely(bio->bi_rw & REQ_FLUSH)) { multipath_make_request()
115 md_flush_request(mddev, bio); multipath_make_request()
121 mp_bh->master_bio = bio; multipath_make_request()
126 bio_endio(bio, -EIO); multipath_make_request()
132 bio_init(&mp_bh->bio); multipath_make_request()
133 __bio_clone_fast(&mp_bh->bio, bio); multipath_make_request()
135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; multipath_make_request()
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; multipath_make_request()
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; multipath_make_request()
138 mp_bh->bio.bi_end_io = multipath_end_request; multipath_make_request()
139 mp_bh->bio.bi_private = mp_bh; multipath_make_request()
140 generic_make_request(&mp_bh->bio); multipath_make_request()
335 struct bio *bio; multipathd() local
350 bio = &mp_bh->bio; multipathd()
351 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; multipathd()
356 bdevname(bio->bi_bdev,b), multipathd()
357 (unsigned long long)bio->bi_iter.bi_sector); multipathd()
362 bdevname(bio->bi_bdev,b), multipathd()
363 (unsigned long long)bio->bi_iter.bi_sector); multipathd()
364 *bio = *(mp_bh->master_bio); multipathd()
365 bio->bi_iter.bi_sector += multipathd()
367 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; multipathd()
368 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; multipathd()
369 bio->bi_end_io = multipath_end_request; multipathd()
370 bio->bi_private = mp_bh; multipathd()
371 generic_make_request(bio); multipathd()
H A Draid10.c80 * correct the read error. To keep track of bad blocks on a per-bio
83 #define IO_BLOCKED ((struct bio *)1)
86 * the success by setting devs[n].bio to IO_MADE_GOOD
88 #define IO_MADE_GOOD ((struct bio *)2)
90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
104 static void end_reshape_write(struct bio *bio, int error);
142 struct bio *bio; r10buf_pool_alloc() local
160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); r10buf_pool_alloc()
161 if (!bio) r10buf_pool_alloc()
163 r10_bio->devs[j].bio = bio; r10buf_pool_alloc()
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); r10buf_pool_alloc()
167 if (!bio) r10buf_pool_alloc()
169 r10_bio->devs[j].repl_bio = bio; r10buf_pool_alloc()
176 struct bio *rbio = r10_bio->devs[j].repl_bio; r10buf_pool_alloc()
177 bio = r10_bio->devs[j].bio; r10buf_pool_alloc()
183 struct bio *rbio = r10_bio->devs[0].bio; r10buf_pool_alloc()
191 bio->bi_io_vec[i].bv_page = page; r10buf_pool_alloc()
201 safe_put_page(bio->bi_io_vec[i-1].bv_page); r10buf_pool_alloc()
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); r10buf_pool_alloc()
208 if (r10_bio->devs[j].bio) r10buf_pool_alloc()
209 bio_put(r10_bio->devs[j].bio); r10buf_pool_alloc()
225 struct bio *bio = r10bio->devs[j].bio; r10buf_pool_free() local
226 if (bio) { r10buf_pool_free()
228 safe_put_page(bio->bi_io_vec[i].bv_page); r10buf_pool_free()
229 bio->bi_io_vec[i].bv_page = NULL; r10buf_pool_free()
231 bio_put(bio); r10buf_pool_free()
233 bio = r10bio->devs[j].repl_bio; r10buf_pool_free()
234 if (bio) r10buf_pool_free()
235 bio_put(bio); r10buf_pool_free()
245 struct bio **bio = & r10_bio->devs[i].bio; put_all_bios() local
246 if (!BIO_SPECIAL(*bio)) put_all_bios()
247 bio_put(*bio); put_all_bios()
248 *bio = NULL; put_all_bios()
249 bio = &r10_bio->devs[i].repl_bio; put_all_bios()
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) put_all_bios()
251 bio_put(*bio); put_all_bios()
252 *bio = NULL; put_all_bios()
297 struct bio *bio = r10_bio->master_bio; raid_end_bio_io() local
301 if (bio->bi_phys_segments) { raid_end_bio_io()
304 bio->bi_phys_segments--; raid_end_bio_io()
305 done = (bio->bi_phys_segments == 0); raid_end_bio_io()
310 clear_bit(BIO_UPTODATE, &bio->bi_flags); raid_end_bio_io()
312 bio_endio(bio, 0); raid_end_bio_io()
334 * Find the disk number which triggered given bio
337 struct bio *bio, int *slotp, int *replp) find_bio_disk()
343 if (r10_bio->devs[slot].bio == bio) find_bio_disk()
345 if (r10_bio->devs[slot].repl_bio == bio) { find_bio_disk()
361 static void raid10_end_read_request(struct bio *bio, int error) raid10_end_read_request() argument
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); raid10_end_read_request()
364 struct r10bio *r10_bio = bio->bi_private; raid10_end_read_request()
379 * Set R10BIO_Uptodate in our master bio, so that raid10_end_read_request()
385 * wait for the 'master' bio. raid10_end_read_request()
441 static void raid10_end_write_request(struct bio *bio, int error) raid10_end_write_request() argument
443 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); raid10_end_write_request()
444 struct r10bio *r10_bio = bio->bi_private; raid10_end_write_request()
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); raid10_end_write_request()
479 * Set R10BIO_Uptodate in our master bio, so that raid10_end_write_request()
485 * wait for the 'master' bio. raid10_end_write_request()
507 bio_put(bio); raid10_end_write_request()
511 r10_bio->devs[slot].bio = IO_MADE_GOOD; raid10_end_write_request()
676 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
678 * @bvm: properties of new bio
819 if (r10_bio->devs[slot].bio == IO_BLOCKED) read_balance()
945 struct bio *bio; flush_pending_writes() local
946 bio = bio_list_get(&conf->pending_bio_list); flush_pending_writes()
954 while (bio) { /* submit pending writes */ flush_pending_writes()
955 struct bio *next = bio->bi_next; flush_pending_writes()
956 bio->bi_next = NULL; flush_pending_writes()
957 if (unlikely((bio->bi_rw & REQ_DISCARD) && flush_pending_writes()
958 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) flush_pending_writes()
960 bio_endio(bio, 0); flush_pending_writes()
962 generic_make_request(bio); flush_pending_writes()
963 bio = next; flush_pending_writes()
1029 * pre-process bio queue isn't empty, wait_barrier()
1112 struct bio *bio; raid10_unplug() local
1126 bio = bio_list_get(&plug->pending); raid10_unplug()
1130 while (bio) { /* submit pending writes */ raid10_unplug()
1131 struct bio *next = bio->bi_next; raid10_unplug()
1132 bio->bi_next = NULL; raid10_unplug()
1133 if (unlikely((bio->bi_rw & REQ_DISCARD) && raid10_unplug()
1134 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) raid10_unplug()
1136 bio_endio(bio, 0); raid10_unplug()
1138 generic_make_request(bio); raid10_unplug()
1139 bio = next; raid10_unplug()
1144 static void __make_request(struct mddev *mddev, struct bio *bio) __make_request() argument
1148 struct bio *read_bio; __make_request()
1150 const int rw = bio_data_dir(bio); __make_request()
1151 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); __make_request()
1152 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); __make_request()
1153 const unsigned long do_discard = (bio->bi_rw __make_request()
1155 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); __make_request()
1171 sectors = bio_sectors(bio); __make_request()
1173 bio->bi_iter.bi_sector < conf->reshape_progress && __make_request()
1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { __make_request()
1180 conf->reshape_progress <= bio->bi_iter.bi_sector || __make_request()
1181 conf->reshape_progress >= bio->bi_iter.bi_sector + __make_request()
1186 bio_data_dir(bio) == WRITE && __make_request()
1188 ? (bio->bi_iter.bi_sector < conf->reshape_safe && __make_request()
1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) __make_request()
1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && __make_request()
1191 bio->bi_iter.bi_sector < conf->reshape_progress))) { __make_request()
1205 r10_bio->master_bio = bio; __make_request()
1209 r10_bio->sector = bio->bi_iter.bi_sector; __make_request()
1214 * track of the number of reads in bio->bi_phys_segments. __make_request()
1219 bio->bi_phys_segments = 0; __make_request()
1220 clear_bit(BIO_SEG_VALID, &bio->bi_flags); __make_request()
1237 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); __make_request()
1238 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, __make_request()
1241 r10_bio->devs[slot].bio = read_bio; __make_request()
1256 - bio->bi_iter.bi_sector); __make_request()
1259 if (bio->bi_phys_segments == 0) __make_request()
1260 bio->bi_phys_segments = 2; __make_request()
1262 bio->bi_phys_segments++; __make_request()
1267 * waiting for it. so hand bio over to raid10d. __make_request()
1273 r10_bio->master_bio = bio; __make_request()
1274 r10_bio->sectors = bio_sectors(bio) - sectors_handled; __make_request()
1277 r10_bio->sector = bio->bi_iter.bi_sector + __make_request()
1295 * bios[x] to bio __make_request()
1301 * of r10_bios is recored in bio->bi_phys_segments just as with __make_request()
1336 r10_bio->devs[i].bio = NULL; __make_request()
1386 r10_bio->devs[i].bio = bio; __make_request()
1390 r10_bio->devs[i].repl_bio = bio; __make_request()
1402 if (r10_bio->devs[j].bio) { __make_request()
1430 if (bio->bi_phys_segments == 0) __make_request()
1431 bio->bi_phys_segments = 2; __make_request()
1433 bio->bi_phys_segments++; __make_request()
1437 bio->bi_iter.bi_sector; __make_request()
1443 struct bio *mbio; __make_request()
1445 if (r10_bio->devs[i].bio) { __make_request()
1447 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); __make_request()
1448 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, __make_request()
1450 r10_bio->devs[i].bio = mbio; __make_request()
1490 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); __make_request()
1491 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, __make_request()
1518 if (sectors_handled < bio_sectors(bio)) { __make_request()
1521 * in bio->bi_phys_segments. __make_request()
1525 r10_bio->master_bio = bio; __make_request()
1526 r10_bio->sectors = bio_sectors(bio) - sectors_handled; __make_request()
1529 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; __make_request()
1536 static void make_request(struct mddev *mddev, struct bio *bio) make_request() argument
1542 struct bio *split; make_request()
1544 if (unlikely(bio->bi_rw & REQ_FLUSH)) { make_request()
1545 md_flush_request(mddev, bio); make_request()
1549 md_write_start(mddev, bio); make_request()
1557 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + make_request()
1558 bio_sectors(bio) > chunk_sects make_request()
1562 split = bio_split(bio, chunk_sects - make_request()
1563 (bio->bi_iter.bi_sector & make_request()
1566 bio_chain(split, bio); make_request()
1568 split = bio; make_request()
1572 } while (split != bio); make_request()
1919 static void end_sync_read(struct bio *bio, int error) end_sync_read() argument
1921 struct r10bio *r10_bio = bio->bi_private; end_sync_read()
1925 if (bio == r10_bio->master_bio) { end_sync_read()
1929 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); end_sync_read()
1931 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) end_sync_read()
1980 static void end_sync_write(struct bio *bio, int error) end_sync_write() argument
1982 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_sync_write()
1983 struct r10bio *r10_bio = bio->bi_private; end_sync_write()
1993 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); end_sync_write()
2040 struct bio *tbio, *fbio; sync_request_write()
2047 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) sync_request_write()
2054 fbio = r10_bio->devs[i].bio; sync_request_write()
2061 tbio = r10_bio->devs[i].bio; sync_request_write()
2067 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { sync_request_write()
2090 /* Ok, we need to write this bio, either to correct an sync_request_write()
2132 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write sync_request_write()
2133 && r10_bio->devs[i].bio != fbio) sync_request_write()
2173 struct bio *bio = r10_bio->devs[0].bio; fix_recovery_read_error() local
2194 bio->bi_io_vec[idx].bv_page, fix_recovery_read_error()
2202 bio->bi_io_vec[idx].bv_page, fix_recovery_read_error()
2250 struct bio *wbio, *wbio2; recovery_request_write()
2259 * share the pages with the first bio recovery_request_write()
2263 wbio = r10_bio->devs[1].bio; recovery_request_write()
2386 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; fix_read_error()
2443 r10_bio->devs[r10_bio->read_slot].bio fix_read_error()
2551 struct bio *bio = r10_bio->master_bio; narrow_write_error() local
2555 /* bio has the data to be written to slot 'i' where narrow_write_error()
2557 * We repeatedly clone the bio and trim down to one block, narrow_write_error()
2560 * It is conceivable that the bio doesn't exactly align with narrow_write_error()
2583 struct bio *wbio; narrow_write_error()
2587 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); narrow_write_error()
2588 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); narrow_write_error()
2610 struct bio *bio; handle_read_error() local
2625 bio = r10_bio->devs[slot].bio; handle_read_error()
2626 bdevname(bio->bi_bdev, b); handle_read_error()
2627 bio_put(bio); handle_read_error()
2628 r10_bio->devs[slot].bio = NULL; handle_read_error()
2635 r10_bio->devs[slot].bio = IO_BLOCKED; handle_read_error()
2659 bio = bio_clone_mddev(r10_bio->master_bio, handle_read_error()
2661 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); handle_read_error()
2662 r10_bio->devs[slot].bio = bio; handle_read_error()
2664 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr handle_read_error()
2666 bio->bi_bdev = rdev->bdev; handle_read_error()
2667 bio->bi_rw = READ | do_sync; handle_read_error()
2668 bio->bi_private = r10_bio; handle_read_error()
2669 bio->bi_end_io = raid10_end_read_request; handle_read_error()
2672 struct bio *mbio = r10_bio->master_bio; handle_read_error()
2683 generic_make_request(bio); handle_read_error()
2698 generic_make_request(bio); handle_read_error()
2717 if (r10_bio->devs[m].bio == NULL) handle_write_completed()
2720 &r10_bio->devs[m].bio->bi_flags)) { handle_write_completed()
2753 struct bio *bio = r10_bio->devs[m].bio; handle_write_completed() local
2755 if (bio == IO_MADE_GOOD) { handle_write_completed()
2761 } else if (bio != NULL && handle_write_completed()
2762 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { handle_write_completed()
2770 bio = r10_bio->devs[m].repl_bio; handle_write_completed()
2772 if (rdev && bio == IO_MADE_GOOD) { handle_write_completed()
2831 generic_make_request(r10_bio->devs[slot].bio); raid10d()
2880 * As we setup these structures, we collect all bio's together into a list
2896 struct bio *biolist = NULL, *bio; sync_request() local
3068 r10_bio->master_bio = (struct bio*)rb2; sync_request()
3118 bio = r10_bio->devs[0].bio; sync_request()
3119 bio_reset(bio); sync_request()
3120 bio->bi_next = biolist; sync_request()
3121 biolist = bio; sync_request()
3122 bio->bi_private = r10_bio; sync_request()
3123 bio->bi_end_io = end_sync_read; sync_request()
3124 bio->bi_rw = READ; sync_request()
3126 bio->bi_iter.bi_sector = from_addr + sync_request()
3128 bio->bi_bdev = rdev->bdev; sync_request()
3144 bio = r10_bio->devs[1].bio; sync_request()
3145 bio_reset(bio); sync_request()
3146 bio->bi_next = biolist; sync_request()
3147 biolist = bio; sync_request()
3148 bio->bi_private = r10_bio; sync_request()
3149 bio->bi_end_io = end_sync_write; sync_request()
3150 bio->bi_rw = WRITE; sync_request()
3151 bio->bi_iter.bi_sector = to_addr sync_request()
3153 bio->bi_bdev = rdev->bdev; sync_request()
3156 r10_bio->devs[1].bio->bi_end_io = NULL; sync_request()
3159 bio = r10_bio->devs[1].repl_bio; sync_request()
3160 if (bio) sync_request()
3161 bio->bi_end_io = NULL; sync_request()
3163 /* Note: if rdev != NULL, then bio sync_request()
3171 if (rdev == NULL || bio == NULL || sync_request()
3174 bio_reset(bio); sync_request()
3175 bio->bi_next = biolist; sync_request()
3176 biolist = bio; sync_request()
3177 bio->bi_private = r10_bio; sync_request()
3178 bio->bi_end_io = end_sync_write; sync_request()
3179 bio->bi_rw = WRITE; sync_request()
3180 bio->bi_iter.bi_sector = to_addr + sync_request()
3182 bio->bi_bdev = rdev->bdev; sync_request()
3274 bio = r10_bio->devs[i].bio; sync_request()
3275 bio_reset(bio); sync_request()
3276 clear_bit(BIO_UPTODATE, &bio->bi_flags); sync_request()
3295 bio->bi_next = biolist; sync_request()
3296 biolist = bio; sync_request()
3297 bio->bi_private = r10_bio; sync_request()
3298 bio->bi_end_io = end_sync_read; sync_request()
3299 bio->bi_rw = READ; sync_request()
3300 bio->bi_iter.bi_sector = sector + sync_request()
3302 bio->bi_bdev = conf->mirrors[d].rdev->bdev; sync_request()
3311 bio = r10_bio->devs[i].repl_bio; sync_request()
3312 bio_reset(bio); sync_request()
3313 clear_bit(BIO_UPTODATE, &bio->bi_flags); sync_request()
3317 bio->bi_next = biolist; sync_request()
3318 biolist = bio; sync_request()
3319 bio->bi_private = r10_bio; sync_request()
3320 bio->bi_end_io = end_sync_write; sync_request()
3321 bio->bi_rw = WRITE; sync_request()
3322 bio->bi_iter.bi_sector = sector + sync_request()
3324 bio->bi_bdev = conf->mirrors[d].replacement->bdev; sync_request()
3331 if (r10_bio->devs[i].bio->bi_end_io) sync_request()
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { sync_request()
3357 struct bio *bio2; sync_request()
3358 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; sync_request()
3359 if (bio_add_page(bio, page, len, 0)) sync_request()
3363 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; sync_request()
3365 bio2 && bio2 != bio; sync_request()
3367 /* remove last page from this bio */ sync_request()
3381 bio = biolist; sync_request()
3384 bio->bi_next = NULL; sync_request()
3385 r10_bio = bio->bi_private; sync_request()
3388 if (bio->bi_end_io == end_sync_read) { sync_request()
3389 md_sync_acct(bio->bi_bdev, nr_sectors); sync_request()
3390 set_bit(BIO_UPTODATE, &bio->bi_flags); sync_request()
3391 generic_make_request(bio); sync_request()
4258 * We store the read-in bio in ->master_bio and the others in reshape_request()
4259 * ->devs[x].bio and ->devs[x].repl_bio. reshape_request()
4269 struct bio *blist; reshape_request()
4270 struct bio *bio, *read_bio; reshape_request() local
4408 struct bio *b; reshape_request()
4416 b = r10_bio->devs[s/2].bio; reshape_request()
4436 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; reshape_request()
4440 for (bio = blist; bio ; bio = bio->bi_next) { reshape_request()
4441 struct bio *bio2; reshape_request()
4442 if (bio_add_page(bio, page, len, 0)) reshape_request()
4447 bio2 && bio2 != bio; reshape_request()
4449 /* Remove last page from this bio */ reshape_request()
4508 struct bio *b; reshape_request_write()
4516 b = r10_bio->devs[s/2].bio; reshape_request_write()
4617 static void end_reshape_write(struct bio *bio, int error) end_reshape_write() argument
4619 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_reshape_write()
4620 struct r10bio *r10_bio = bio->bi_private; end_reshape_write()
4628 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); end_reshape_write()
336 find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, struct bio *bio, int *slotp, int *replp) find_bio_disk() argument
H A Ddm-cache-target.c8 #include "dm-bio-prison.h"
9 #include "dm-bio-record.h"
66 * There are a couple of places where we let a bio run, but want to do some
75 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, dm_hook_bio() argument
78 h->bi_end_io = bio->bi_end_io; dm_hook_bio()
79 h->bi_private = bio->bi_private; dm_hook_bio()
81 bio->bi_end_io = bi_end_io; dm_hook_bio()
82 bio->bi_private = bi_private; dm_hook_bio()
85 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) dm_unhook_bio() argument
87 bio->bi_end_io = h->bi_end_io; dm_unhook_bio()
88 bio->bi_private = h->bi_private; dm_unhook_bio()
91 * Must bump bi_remaining to allow bio to complete with dm_unhook_bio()
94 atomic_inc(&bio->bi_remaining); dm_unhook_bio()
331 * Processing a bio in the worker thread may require these memory
480 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, bio_detain_range()
488 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); bio_detain_range()
496 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, bio_detain()
501 return bio_detain_range(cache, oblock, end, bio, bio_detain()
661 * Per bio data
690 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) get_per_bio_data() argument
692 struct per_bio_data *pb = dm_per_bio_data(bio, data_size); get_per_bio_data()
697 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) init_per_bio_data() argument
699 struct per_bio_data *pb = get_per_bio_data(bio, data_size); init_per_bio_data()
702 pb->req_nr = dm_bio_get_target_bio_nr(bio); init_per_bio_data()
711 static void remap_to_origin(struct cache *cache, struct bio *bio) remap_to_origin() argument
713 bio->bi_bdev = cache->origin_dev->bdev; remap_to_origin()
716 static void remap_to_cache(struct cache *cache, struct bio *bio, remap_to_cache() argument
719 sector_t bi_sector = bio->bi_iter.bi_sector; remap_to_cache()
722 bio->bi_bdev = cache->cache_dev->bdev; remap_to_cache()
724 bio->bi_iter.bi_sector = remap_to_cache()
728 bio->bi_iter.bi_sector = remap_to_cache()
733 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) check_if_tick_bio_needed() argument
737 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); check_if_tick_bio_needed()
741 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { check_if_tick_bio_needed()
748 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, remap_to_origin_clear_discard() argument
751 check_if_tick_bio_needed(cache, bio); remap_to_origin_clear_discard()
752 remap_to_origin(cache, bio); remap_to_origin_clear_discard()
753 if (bio_data_dir(bio) == WRITE) remap_to_origin_clear_discard()
757 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, remap_to_cache_dirty() argument
760 check_if_tick_bio_needed(cache, bio); remap_to_cache_dirty()
761 remap_to_cache(cache, bio, cblock); remap_to_cache_dirty()
762 if (bio_data_dir(bio) == WRITE) { remap_to_cache_dirty()
768 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) get_bio_block() argument
770 sector_t block_nr = bio->bi_iter.bi_sector; get_bio_block()
780 static int bio_triggers_commit(struct cache *cache, struct bio *bio) bio_triggers_commit() argument
782 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); bio_triggers_commit()
789 static void inc_ds(struct cache *cache, struct bio *bio, inc_ds() argument
793 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); inc_ds()
801 static void issue(struct cache *cache, struct bio *bio) issue() argument
805 if (!bio_triggers_commit(cache, bio)) { issue()
806 generic_make_request(bio); issue()
816 bio_list_add(&cache->deferred_flush_bios, bio); issue()
820 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) inc_and_issue() argument
822 inc_ds(cache, bio, cell); inc_and_issue()
823 issue(cache, bio); inc_and_issue()
826 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) defer_writethrough_bio() argument
831 bio_list_add(&cache->deferred_writethrough_bios, bio); defer_writethrough_bio()
837 static void writethrough_endio(struct bio *bio, int err) writethrough_endio() argument
839 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); writethrough_endio()
841 dm_unhook_bio(&pb->hook_info, bio); writethrough_endio()
844 bio_endio(bio, err); writethrough_endio()
848 dm_bio_restore(&pb->bio_details, bio); writethrough_endio()
849 remap_to_cache(pb->cache, bio, pb->cblock); writethrough_endio()
852 * We can't issue this bio directly, since we're in interrupt writethrough_endio()
853 * context. So it gets put on a bio list for processing by the writethrough_endio()
856 defer_writethrough_bio(pb->cache, bio); writethrough_endio()
862 * bio and send them in parallel, but for now we're doing them in
865 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, remap_to_origin_then_cache() argument
868 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); remap_to_origin_then_cache()
872 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); remap_to_origin_then_cache()
873 dm_bio_record(&pb->bio_details, bio); remap_to_origin_then_cache()
875 remap_to_origin_clear_discard(pb->cache, bio, oblock); remap_to_origin_then_cache()
1068 static void overwrite_endio(struct bio *bio, int err) overwrite_endio() argument
1070 struct dm_cache_migration *mg = bio->bi_private; overwrite_endio()
1073 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); overwrite_endio()
1076 dm_unhook_bio(&pb->hook_info, bio); overwrite_endio()
1090 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) issue_overwrite() argument
1093 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); issue_overwrite()
1095 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); issue_overwrite()
1096 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); issue_overwrite()
1102 generic_make_request(bio); issue_overwrite()
1105 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) bio_writes_complete_block() argument
1107 return (bio_data_dir(bio) == WRITE) && bio_writes_complete_block()
1108 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); bio_writes_complete_block()
1117 static void calc_discard_block_range(struct cache *cache, struct bio *bio, calc_discard_block_range() argument
1120 sector_t sb = bio->bi_iter.bi_sector; calc_discard_block_range()
1121 sector_t se = bio_end_sector(bio); calc_discard_block_range()
1134 struct bio *bio = mg->new_ocell->holder; issue_discard() local
1136 calc_discard_block_range(mg->cache, bio, &b, &e); issue_discard()
1142 bio_endio(bio, 0); issue_discard()
1161 struct bio *bio = mg->new_ocell->holder; issue_copy_or_discard() local
1166 !avoid && bio_writes_complete_block(cache, bio)) { issue_copy_or_discard()
1167 issue_overwrite(mg, bio); issue_copy_or_discard()
1374 * bio processing
1376 static void defer_bio(struct cache *cache, struct bio *bio) defer_bio() argument
1381 bio_list_add(&cache->deferred_bios, bio); defer_bio()
1387 static void process_flush_bio(struct cache *cache, struct bio *bio) process_flush_bio() argument
1390 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); process_flush_bio()
1392 BUG_ON(bio->bi_iter.bi_size); process_flush_bio()
1394 remap_to_origin(cache, bio); process_flush_bio()
1396 remap_to_cache(cache, bio, 0); process_flush_bio()
1403 issue(cache, bio); process_flush_bio()
1407 struct bio *bio) process_discard_bio()
1413 calc_discard_block_range(cache, bio, &b, &e); process_discard_bio()
1415 bio_endio(bio, 0); process_discard_bio()
1420 r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc, process_discard_bio()
1436 static void inc_hit_counter(struct cache *cache, struct bio *bio) inc_hit_counter() argument
1438 atomic_inc(bio_data_dir(bio) == READ ? inc_hit_counter()
1442 static void inc_miss_counter(struct cache *cache, struct bio *bio) inc_miss_counter() argument
1444 atomic_inc(bio_data_dir(bio) == READ ? inc_miss_counter()
1475 struct bio *bio) process_bio()
1479 dm_oblock_t block = get_bio_block(cache, bio); process_bio()
1490 r = bio_detain(cache, block, bio, cell_prealloc, process_bio()
1504 bio, &ool.locker, &lookup_result); process_bio()
1513 inc_miss_counter(cache, bio); process_bio()
1521 if (bio_data_dir(bio) == WRITE) { process_bio()
1528 remap_to_origin_clear_discard(cache, bio, block); process_bio()
1529 inc_and_issue(cache, bio, new_ocell); process_bio()
1532 inc_hit_counter(cache, bio); process_bio()
1534 if (bio_data_dir(bio) == WRITE && process_bio()
1537 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); process_bio()
1538 inc_and_issue(cache, bio, new_ocell); process_bio()
1541 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); process_bio()
1542 inc_and_issue(cache, bio, new_ocell); process_bio()
1549 inc_miss_counter(cache, bio); process_bio()
1550 remap_to_origin_clear_discard(cache, bio, block); process_bio()
1551 inc_and_issue(cache, bio, new_ocell); process_bio()
1570 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, process_bio()
1572 bio_io_error(bio); process_bio()
1604 struct bio *bio; process_deferred_bios() local
1618 * this bio might require one, we pause until there are some process_deferred_bios()
1628 bio = bio_list_pop(&bios); process_deferred_bios()
1630 if (bio->bi_rw & REQ_FLUSH) process_deferred_bios()
1631 process_flush_bio(cache, bio); process_deferred_bios()
1632 else if (bio->bi_rw & REQ_DISCARD) process_deferred_bios()
1633 process_discard_bio(cache, &structs, bio); process_deferred_bios()
1635 process_bio(cache, &structs, bio); process_deferred_bios()
1645 struct bio *bio; process_deferred_flush_bios() local
1657 while ((bio = bio_list_pop(&bios))) process_deferred_flush_bios()
1658 submit_bios ? generic_make_request(bio) : bio_io_error(bio); process_deferred_flush_bios()
1665 struct bio *bio; process_deferred_writethrough_bios() local
1677 while ((bio = bio_list_pop(&bios))) process_deferred_writethrough_bios()
1678 generic_make_request(bio); process_deferred_writethrough_bios()
1809 struct bio *bio; requeue_deferred_io() local
1816 while ((bio = bio_list_pop(&bios))) requeue_deferred_io()
1817 bio_endio(bio, DM_ENDIO_REQUEUE); requeue_deferred_io()
2502 *error = "could not create bio prison"; cache_create()
2604 static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) __cache_map() argument
2607 dm_oblock_t block = get_bio_block(cache, bio); __cache_map()
2612 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); __cache_map()
2623 remap_to_origin(cache, bio); __cache_map()
2627 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { __cache_map()
2628 defer_bio(cache, bio); __cache_map()
2637 defer_bio(cache, bio); __cache_map()
2641 r = bio_detain(cache, block, bio, *cell, __cache_map()
2646 defer_bio(cache, bio); __cache_map()
2654 bio, &ool.locker, &lookup_result); __cache_map()
2662 bio_io_error(bio); __cache_map()
2670 if (bio_data_dir(bio) == WRITE) { __cache_map()
2679 inc_miss_counter(cache, bio); __cache_map()
2680 remap_to_origin_clear_discard(cache, bio, block); __cache_map()
2684 inc_hit_counter(cache, bio); __cache_map()
2685 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && __cache_map()
2687 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); __cache_map()
2689 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); __cache_map()
2694 inc_miss_counter(cache, bio); __cache_map()
2700 bio_endio(bio, 0); __cache_map()
2705 remap_to_origin_clear_discard(cache, bio, block); __cache_map()
2710 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, __cache_map()
2713 bio_io_error(bio); __cache_map()
2720 static int cache_map(struct dm_target *ti, struct bio *bio) cache_map() argument
2726 r = __cache_map(cache, bio, &cell); cache_map()
2728 inc_ds(cache, bio, cell); cache_map()
2735 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) cache_end_io() argument
2740 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); cache_end_io()
3314 * and could always be out of date by the time the bio is submitted.)
479 bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end, struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, cell_free_fn free_fn, void *free_context, struct dm_bio_prison_cell **cell_result) bio_detain_range() argument
495 bio_detain(struct cache *cache, dm_oblock_t oblock, struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, cell_free_fn free_fn, void *free_context, struct dm_bio_prison_cell **cell_result) bio_detain() argument
1406 process_discard_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) process_discard_bio() argument
1474 process_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) process_bio() argument
H A Ddm-stripe.c13 #include <linux/bio.h>
259 static int stripe_map_range(struct stripe_c *sc, struct bio *bio, stripe_map_range() argument
264 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, stripe_map_range()
266 stripe_map_range_sector(sc, bio_end_sector(bio), stripe_map_range()
269 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; stripe_map_range()
270 bio->bi_iter.bi_sector = begin + stripe_map_range()
272 bio->bi_iter.bi_size = to_bytes(end - begin); stripe_map_range()
276 bio_endio(bio, 0); stripe_map_range()
281 static int stripe_map(struct dm_target *ti, struct bio *bio) stripe_map() argument
287 if (bio->bi_rw & REQ_FLUSH) { stripe_map()
288 target_bio_nr = dm_bio_get_target_bio_nr(bio); stripe_map()
290 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; stripe_map()
293 if (unlikely(bio->bi_rw & REQ_DISCARD) || stripe_map()
294 unlikely(bio->bi_rw & REQ_WRITE_SAME)) { stripe_map()
295 target_bio_nr = dm_bio_get_target_bio_nr(bio); stripe_map()
297 return stripe_map_range(sc, bio, target_bio_nr); stripe_map()
300 stripe_map_sector(sc, bio->bi_iter.bi_sector, stripe_map()
301 &stripe, &bio->bi_iter.bi_sector); stripe_map()
303 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; stripe_map()
304 bio->bi_bdev = sc->stripe[stripe].dev->bdev; stripe_map()
352 static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) stripe_end_io() argument
361 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) stripe_end_io()
369 MAJOR(disk_devt(bio->bi_bdev->bd_disk)), stripe_end_io()
370 MINOR(disk_devt(bio->bi_bdev->bd_disk))); stripe_end_io()
H A Dlinear.c56 * linear_mergeable_bvec -- tell bio layer if two requests can be merged
58 * @bvm: properties of new bio
259 static void linear_make_request(struct mddev *mddev, struct bio *bio) linear_make_request() argument
263 struct bio *split; linear_make_request()
266 if (unlikely(bio->bi_rw & REQ_FLUSH)) { linear_make_request()
267 md_flush_request(mddev, bio); linear_make_request()
272 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); linear_make_request()
276 bio->bi_bdev = tmp_dev->rdev->bdev; linear_make_request()
278 if (unlikely(bio->bi_iter.bi_sector >= end_sector || linear_make_request()
279 bio->bi_iter.bi_sector < start_sector)) linear_make_request()
282 if (unlikely(bio_end_sector(bio) > end_sector)) { linear_make_request()
283 /* This bio crosses a device boundary, so we have to linear_make_request()
286 split = bio_split(bio, end_sector - linear_make_request()
287 bio->bi_iter.bi_sector, linear_make_request()
289 bio_chain(split, bio); linear_make_request()
291 split = bio; linear_make_request()
303 } while (split != bio); linear_make_request()
311 (unsigned long long)bio->bi_iter.bi_sector, linear_make_request()
315 bio_io_error(bio); linear_make_request()
H A Ddm-io.c12 #include <linux/bio.h>
84 * We need to keep track of which region a bio is doing io for.
90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, store_io_and_region_in_bio() argument
98 bio->bi_private = (void *)((unsigned long)io | region); store_io_and_region_in_bio()
101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, retrieve_io_and_region_from_bio() argument
104 unsigned long val = (unsigned long)bio->bi_private; retrieve_io_and_region_from_bio()
137 static void endio(struct bio *bio, int error) endio() argument
142 if (error && bio_data_dir(bio) == READ) endio()
143 zero_fill_bio(bio); endio()
146 * The bio destructor in bio_put() may use the io object. endio()
148 retrieve_io_and_region_from_bio(bio, &io, &region); endio()
150 bio_put(bio); endio()
219 static void bio_dp_init(struct dpages *dp, struct bio *bio) bio_dp_init() argument
223 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); bio_dp_init()
224 dp->context_u = bio->bi_iter.bi_bvec_done; bio_dp_init()
283 struct bio *bio; do_region() local
312 * Allocate a suitably sized-bio. do_region()
320 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); do_region()
321 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); do_region()
322 bio->bi_bdev = where->bdev; do_region()
323 bio->bi_end_io = endio; do_region()
324 store_io_and_region_in_bio(bio, io, region); do_region()
328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; do_region()
335 bio_add_page(bio, page, logical_block_size, offset); do_region()
337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; do_region()
348 if (!bio_add_page(bio, page, len, offset)) do_region()
357 submit_bio(rw, bio); do_region()
477 bio_dp_init(dp, io_req->mem.ptr.bio); dp_init()
H A Ddm-thin.c8 #include "dm-bio-prison.h"
209 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, bio_detain() argument
339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); bio_detain()
475 struct bio *bio; error_bio_list() local
477 while ((bio = bio_list_pop(bios))) error_bio_list()
478 bio_endio(bio, error); error_bio_list()
550 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) get_bio_block() argument
553 sector_t block_nr = bio->bi_iter.bi_sector; get_bio_block()
563 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) remap() argument
566 sector_t bi_sector = bio->bi_iter.bi_sector; remap()
568 bio->bi_bdev = tc->pool_dev->bdev; remap()
570 bio->bi_iter.bi_sector = remap()
574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + remap()
578 static void remap_to_origin(struct thin_c *tc, struct bio *bio) remap_to_origin() argument
580 bio->bi_bdev = tc->origin_dev->bdev; remap_to_origin()
583 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) bio_triggers_commit() argument
585 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && bio_triggers_commit()
589 static void inc_all_io_entry(struct pool *pool, struct bio *bio) inc_all_io_entry() argument
593 if (bio->bi_rw & REQ_DISCARD) inc_all_io_entry()
596 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); inc_all_io_entry()
600 static void issue(struct thin_c *tc, struct bio *bio) issue() argument
605 if (!bio_triggers_commit(tc, bio)) { issue()
606 generic_make_request(bio); issue()
611 * Complete bio with an error if earlier I/O caused changes to issue()
616 bio_io_error(bio); issue()
625 bio_list_add(&pool->deferred_flush_bios, bio); issue()
629 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) remap_to_origin_and_issue() argument
631 remap_to_origin(tc, bio); remap_to_origin_and_issue()
632 issue(tc, bio); remap_to_origin_and_issue()
635 static void remap_and_issue(struct thin_c *tc, struct bio *bio, remap_and_issue() argument
638 remap(tc, bio, block); remap_and_issue()
639 issue(tc, bio); remap_and_issue()
667 * If the bio covers the whole area of a block then we can avoid
668 * zeroing or copying. Instead this bio is hooked. The bio will
670 * the bio twice.
672 struct bio *bio; member in struct:dm_thin_new_mapping
704 static void overwrite_endio(struct bio *bio, int err) overwrite_endio() argument
706 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); overwrite_endio()
739 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
751 struct bio *bio; __inc_remap_and_issue_cell() local
753 while ((bio = bio_list_pop(&cell->bios))) { __inc_remap_and_issue_cell()
754 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) __inc_remap_and_issue_cell()
755 bio_list_add(&info->defer_bios, bio); __inc_remap_and_issue_cell()
757 inc_all_io_entry(info->tc->pool, bio); __inc_remap_and_issue_cell()
760 * We can't issue the bios with the bio prison lock __inc_remap_and_issue_cell()
764 bio_list_add(&info->issue_bios, bio); __inc_remap_and_issue_cell()
773 struct bio *bio; inc_remap_and_issue_cell() local
788 while ((bio = bio_list_pop(&info.defer_bios))) inc_remap_and_issue_cell()
789 thin_defer_bio(tc, bio); inc_remap_and_issue_cell()
791 while ((bio = bio_list_pop(&info.issue_bios))) inc_remap_and_issue_cell()
792 remap_and_issue(info.tc, bio, block); inc_remap_and_issue_cell()
797 if (m->bio) { process_prepared_mapping_fail()
798 m->bio->bi_end_io = m->saved_bi_end_io; process_prepared_mapping_fail()
799 atomic_inc(&m->bio->bi_remaining); process_prepared_mapping_fail()
810 struct bio *bio; process_prepared_mapping() local
813 bio = m->bio; process_prepared_mapping()
814 if (bio) { process_prepared_mapping()
815 bio->bi_end_io = m->saved_bi_end_io; process_prepared_mapping()
816 atomic_inc(&bio->bi_remaining); process_prepared_mapping()
838 * If we are processing a write bio that completely covers the block, process_prepared_mapping()
842 if (bio) { process_prepared_mapping()
844 bio_endio(bio, 0); process_prepared_mapping()
860 bio_io_error(m->bio); process_prepared_discard_fail()
870 inc_all_io_entry(tc->pool, m->bio); process_prepared_discard_passdown()
876 remap_and_issue(tc, m->bio, m->data_block); process_prepared_discard_passdown()
880 bio_endio(m->bio, 0); process_prepared_discard_passdown()
882 remap_and_issue(tc, m->bio, m->data_block); process_prepared_discard_passdown()
885 bio_endio(m->bio, 0); process_prepared_discard_passdown()
919 * Deferred bio jobs.
921 static int io_overlaps_block(struct pool *pool, struct bio *bio) io_overlaps_block() argument
923 return bio->bi_iter.bi_size == io_overlaps_block()
927 static int io_overwrites_block(struct pool *pool, struct bio *bio) io_overwrites_block() argument
929 return (bio_data_dir(bio) == WRITE) && io_overwrites_block()
930 io_overlaps_block(pool, bio); io_overwrites_block()
933 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, save_and_set_endio() argument
936 *save = bio->bi_end_io; save_and_set_endio()
937 bio->bi_end_io = fn; save_and_set_endio()
958 m->bio = NULL; get_next_mapping()
982 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, remap_and_issue_overwrite() argument
987 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); remap_and_issue_overwrite()
990 m->bio = bio; remap_and_issue_overwrite()
991 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); remap_and_issue_overwrite()
992 inc_all_io_entry(pool, bio); remap_and_issue_overwrite()
993 remap_and_issue(tc, bio, data_block); remap_and_issue_overwrite()
1002 struct dm_bio_prison_cell *cell, struct bio *bio, schedule_copy()
1028 * bio immediately. Otherwise we use kcopyd to clone the data first. schedule_copy()
1030 if (io_overwrites_block(pool, bio)) schedule_copy()
1031 remap_and_issue_overwrite(tc, bio, data_dest, m); schedule_copy()
1073 struct dm_bio_prison_cell *cell, struct bio *bio) schedule_internal_copy()
1076 data_origin, data_dest, cell, bio, schedule_internal_copy()
1082 struct bio *bio) schedule_zero()
1095 * zeroing pre-existing data, we can issue the bio immediately. schedule_zero()
1101 else if (io_overwrites_block(pool, bio)) schedule_zero()
1102 remap_and_issue_overwrite(tc, bio, data_block, m); schedule_zero()
1112 struct dm_bio_prison_cell *cell, struct bio *bio) schedule_external_copy()
1120 virt_block, data_dest, cell, bio, schedule_external_copy()
1125 virt_block, data_dest, cell, bio, schedule_external_copy()
1129 schedule_zero(tc, virt_block, data_dest, cell, bio); schedule_external_copy()
1235 static void retry_on_resume(struct bio *bio) retry_on_resume() argument
1237 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); retry_on_resume()
1242 bio_list_add(&tc->retry_on_resume_list, bio); retry_on_resume()
1253 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); should_error_unserviceable_bio()
1264 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); should_error_unserviceable_bio()
1269 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) handle_unserviceable_bio() argument
1274 bio_endio(bio, error); handle_unserviceable_bio()
1276 retry_on_resume(bio); handle_unserviceable_bio()
1281 struct bio *bio; retry_bios_on_resume() local
1294 while ((bio = bio_list_pop(&bios))) retry_bios_on_resume()
1295 retry_on_resume(bio); retry_bios_on_resume()
1301 struct bio *bio = cell->holder; process_discard_cell() local
1305 dm_block_t block = get_bio_block(tc, bio); process_discard_cell()
1323 if (bio_detain(tc->pool, &key2, bio, &cell2)) { process_discard_cell()
1328 if (io_overlaps_block(pool, bio)) { process_discard_cell()
1341 m->bio = bio; process_discard_cell()
1347 inc_all_io_entry(pool, bio); process_discard_cell()
1357 remap_and_issue(tc, bio, lookup_result.block); process_discard_cell()
1359 bio_endio(bio, 0); process_discard_cell()
1368 bio_endio(bio, 0); process_discard_cell()
1375 bio_io_error(bio); process_discard_cell()
1380 static void process_discard_bio(struct thin_c *tc, struct bio *bio) process_discard_bio() argument
1384 dm_block_t block = get_bio_block(tc, bio); process_discard_bio()
1387 if (bio_detain(tc->pool, &key, bio, &cell)) process_discard_bio()
1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, break_sharing() argument
1406 data_block, cell, bio); break_sharing()
1425 struct bio *bio; __remap_and_issue_shared_cell() local
1427 while ((bio = bio_list_pop(&cell->bios))) { __remap_and_issue_shared_cell()
1428 if ((bio_data_dir(bio) == WRITE) || __remap_and_issue_shared_cell()
1429 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) __remap_and_issue_shared_cell()
1430 bio_list_add(&info->defer_bios, bio); __remap_and_issue_shared_cell()
1432 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; __remap_and_issue_shared_cell()
1435 inc_all_io_entry(info->tc->pool, bio); __remap_and_issue_shared_cell()
1436 bio_list_add(&info->issue_bios, bio); __remap_and_issue_shared_cell()
1445 struct bio *bio; remap_and_issue_shared_cell() local
1455 while ((bio = bio_list_pop(&info.defer_bios))) remap_and_issue_shared_cell()
1456 thin_defer_bio(tc, bio); remap_and_issue_shared_cell()
1458 while ((bio = bio_list_pop(&info.issue_bios))) remap_and_issue_shared_cell()
1459 remap_and_issue(tc, bio, block); remap_and_issue_shared_cell()
1462 static void process_shared_bio(struct thin_c *tc, struct bio *bio, process_shared_bio() argument
1476 if (bio_detain(pool, &key, bio, &data_cell)) { process_shared_bio()
1481 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { process_shared_bio()
1482 break_sharing(tc, bio, block, &key, lookup_result, data_cell); process_shared_bio()
1485 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); process_shared_bio()
1488 inc_all_io_entry(pool, bio); process_shared_bio()
1489 remap_and_issue(tc, bio, lookup_result->block); process_shared_bio()
1496 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, provision_block() argument
1506 if (!bio->bi_iter.bi_size) { provision_block()
1507 inc_all_io_entry(pool, bio); provision_block()
1510 remap_and_issue(tc, bio, 0); provision_block()
1517 if (bio_data_dir(bio) == READ) { provision_block()
1518 zero_fill_bio(bio); provision_block()
1520 bio_endio(bio, 0); provision_block()
1528 schedule_external_copy(tc, block, data_block, cell, bio); provision_block()
1530 schedule_zero(tc, block, data_block, cell, bio); provision_block()
1549 struct bio *bio = cell->holder; process_cell() local
1550 dm_block_t block = get_bio_block(tc, bio); process_cell()
1562 process_shared_bio(tc, bio, block, &lookup_result, cell); process_cell()
1564 inc_all_io_entry(pool, bio); process_cell()
1565 remap_and_issue(tc, bio, lookup_result.block); process_cell()
1571 if (bio_data_dir(bio) == READ && tc->origin_dev) { process_cell()
1572 inc_all_io_entry(pool, bio); process_cell()
1575 if (bio_end_sector(bio) <= tc->origin_size) process_cell()
1576 remap_to_origin_and_issue(tc, bio); process_cell()
1578 else if (bio->bi_iter.bi_sector < tc->origin_size) { process_cell()
1579 zero_fill_bio(bio); process_cell()
1580 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; process_cell()
1581 remap_to_origin_and_issue(tc, bio); process_cell()
1584 zero_fill_bio(bio); process_cell()
1585 bio_endio(bio, 0); process_cell()
1588 provision_block(tc, bio, block, cell); process_cell()
1595 bio_io_error(bio); process_cell()
1600 static void process_bio(struct thin_c *tc, struct bio *bio) process_bio() argument
1603 dm_block_t block = get_bio_block(tc, bio); process_bio()
1612 if (bio_detain(pool, &key, bio, &cell)) process_bio()
1618 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, __process_bio_read_only() argument
1622 int rw = bio_data_dir(bio); __process_bio_read_only()
1623 dm_block_t block = get_bio_block(tc, bio); __process_bio_read_only()
1629 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { __process_bio_read_only()
1630 handle_unserviceable_bio(tc->pool, bio); __process_bio_read_only()
1634 inc_all_io_entry(tc->pool, bio); __process_bio_read_only()
1635 remap_and_issue(tc, bio, lookup_result.block); __process_bio_read_only()
1645 handle_unserviceable_bio(tc->pool, bio); __process_bio_read_only()
1650 inc_all_io_entry(tc->pool, bio); __process_bio_read_only()
1651 remap_to_origin_and_issue(tc, bio); __process_bio_read_only()
1655 zero_fill_bio(bio); __process_bio_read_only()
1656 bio_endio(bio, 0); __process_bio_read_only()
1664 bio_io_error(bio); __process_bio_read_only()
1669 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) process_bio_read_only() argument
1671 __process_bio_read_only(tc, bio, NULL); process_bio_read_only()
1679 static void process_bio_success(struct thin_c *tc, struct bio *bio) process_bio_success() argument
1681 bio_endio(bio, 0); process_bio_success()
1684 static void process_bio_fail(struct thin_c *tc, struct bio *bio) process_bio_fail() argument
1686 bio_io_error(bio); process_bio_fail()
1712 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) __thin_bio_rb_add() argument
1716 sector_t bi_sector = bio->bi_iter.bi_sector; __thin_bio_rb_add()
1730 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); __thin_bio_rb_add()
1739 struct bio *bio; __extract_sorted_bios() local
1743 bio = thin_bio(pbd); __extract_sorted_bios()
1745 bio_list_add(&tc->deferred_bio_list, bio); __extract_sorted_bios()
1754 struct bio *bio; __sort_thin_deferred_bios() local
1762 while ((bio = bio_list_pop(&bios))) __sort_thin_deferred_bios()
1763 __thin_bio_rb_add(tc, bio); __sort_thin_deferred_bios()
1777 struct bio *bio; process_thin_deferred_bios() local
1804 while ((bio = bio_list_pop(&bios))) { process_thin_deferred_bios()
1807 * this bio might require one, we pause until there are some process_thin_deferred_bios()
1812 bio_list_add(&tc->deferred_bio_list, bio); process_thin_deferred_bios()
1818 if (bio->bi_rw & REQ_DISCARD) process_thin_deferred_bios()
1819 pool->process_discard(tc, bio); process_thin_deferred_bios()
1821 pool->process_bio(tc, bio); process_thin_deferred_bios()
1892 * this bio might require one, we pause until there are some process_thin_deferred_cells()
1955 struct bio *bio; process_deferred_bios() local
1981 while ((bio = bio_list_pop(&bios))) process_deferred_bios()
1982 bio_io_error(bio); process_deferred_bios()
1987 while ((bio = bio_list_pop(&bios))) process_deferred_bios()
1988 generic_make_request(bio); process_deferred_bios()
2237 * Called only while mapping a thin bio to hand it over to the workqueue.
2239 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) thin_defer_bio() argument
2245 bio_list_add(&tc->deferred_bio_list, bio); thin_defer_bio()
2251 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) thin_defer_bio_with_throttle() argument
2256 thin_defer_bio(tc, bio); thin_defer_bio_with_throttle()
2274 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) thin_hook_bio() argument
2276 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); thin_hook_bio()
2287 static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_bio_map() argument
2291 dm_block_t block = get_bio_block(tc, bio); thin_bio_map()
2297 thin_hook_bio(tc, bio); thin_bio_map()
2300 bio_endio(bio, DM_ENDIO_REQUEUE); thin_bio_map()
2305 bio_io_error(bio); thin_bio_map()
2309 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { thin_bio_map()
2310 thin_defer_bio_with_throttle(tc, bio); thin_bio_map()
2319 if (bio_detain(tc->pool, &key, bio, &virt_cell)) thin_bio_map()
2349 if (bio_detain(tc->pool, &key, bio, &data_cell)) { thin_bio_map()
2354 inc_all_io_entry(tc->pool, bio); thin_bio_map()
2358 remap(tc, bio, result.block); thin_bio_map()
2372 bio_io_error(bio); thin_bio_map()
2557 *error = "Error creating pool's bio prison"; pool_create()
2985 static int pool_map(struct dm_target *ti, struct bio *bio) pool_map() argument
2996 bio->bi_bdev = pt->data_dev->bdev; pool_map()
3877 static int thin_map(struct dm_target *ti, struct bio *bio) thin_map() argument
3879 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); thin_map()
3881 return thin_bio_map(ti, bio); thin_map()
3884 static int thin_endio(struct dm_target *ti, struct bio *bio, int err) thin_endio() argument
3887 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); thin_endio()
999 schedule_copy(struct thin_c *tc, dm_block_t virt_block, struct dm_dev *origin, dm_block_t data_origin, dm_block_t data_dest, struct dm_bio_prison_cell *cell, struct bio *bio, sector_t len) schedule_copy() argument
1071 schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_origin, dm_block_t data_dest, struct dm_bio_prison_cell *cell, struct bio *bio) schedule_internal_copy() argument
1080 schedule_zero(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_block, struct dm_bio_prison_cell *cell, struct bio *bio) schedule_zero() argument
1110 schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_dest, struct dm_bio_prison_cell *cell, struct bio *bio) schedule_external_copy() argument
H A Ddm-snap.c176 * in a bio list
201 struct bio *full_bio;
218 static void init_tracked_chunk(struct bio *bio) init_tracked_chunk() argument
220 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); init_tracked_chunk()
224 static bool is_bio_tracked(struct bio *bio) is_bio_tracked() argument
226 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); is_bio_tracked()
230 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) track_chunk() argument
232 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); track_chunk()
242 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) stop_tracking_chunk() argument
244 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); stop_tracking_chunk()
838 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) __release_queued_bios_after_merge()
898 static void flush_bios(struct bio *bio);
902 struct bio *b = NULL; remove_single_exception_chunk()
1039 static void error_bios(struct bio *bio);
1044 struct bio *b = NULL; merge_callback()
1366 static void flush_bios(struct bio *bio) flush_bios() argument
1368 struct bio *n; flush_bios()
1370 while (bio) { flush_bios()
1371 n = bio->bi_next; flush_bios()
1372 bio->bi_next = NULL; flush_bios()
1373 generic_make_request(bio); flush_bios()
1374 bio = n; flush_bios()
1378 static int do_origin(struct dm_dev *origin, struct bio *bio);
1383 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) retry_origin_bios() argument
1385 struct bio *n; retry_origin_bios()
1388 while (bio) { retry_origin_bios()
1389 n = bio->bi_next; retry_origin_bios()
1390 bio->bi_next = NULL; retry_origin_bios()
1391 r = do_origin(s->origin, bio); retry_origin_bios()
1393 generic_make_request(bio); retry_origin_bios()
1394 bio = n; retry_origin_bios()
1401 static void error_bios(struct bio *bio) error_bios() argument
1403 struct bio *n; error_bios()
1405 while (bio) { error_bios()
1406 n = bio->bi_next; error_bios()
1407 bio->bi_next = NULL; error_bios()
1408 bio_io_error(bio); error_bios()
1409 bio = n; error_bios()
1436 struct bio *origin_bios = NULL; pending_complete()
1437 struct bio *snapshot_bios = NULL; pending_complete()
1438 struct bio *full_bio = NULL; pending_complete()
1574 static void full_bio_end_io(struct bio *bio, int error) full_bio_end_io() argument
1576 void *callback_data = bio->bi_private; full_bio_end_io()
1582 struct bio *bio) start_full_bio()
1587 pe->full_bio = bio; start_full_bio()
1588 pe->full_bio_end_io = bio->bi_end_io; start_full_bio()
1589 pe->full_bio_private = bio->bi_private; start_full_bio()
1594 bio->bi_end_io = full_bio_end_io; start_full_bio()
1595 bio->bi_private = callback_data; start_full_bio()
1597 generic_make_request(bio); start_full_bio()
1650 struct bio *bio, chunk_t chunk) remap_exception()
1652 bio->bi_bdev = s->cow->bdev; remap_exception()
1653 bio->bi_iter.bi_sector = remap_exception()
1656 (bio->bi_iter.bi_sector & s->store->chunk_mask); remap_exception()
1659 static int snapshot_map(struct dm_target *ti, struct bio *bio) snapshot_map() argument
1667 init_tracked_chunk(bio); snapshot_map()
1669 if (bio->bi_rw & REQ_FLUSH) { snapshot_map()
1670 bio->bi_bdev = s->cow->bdev; snapshot_map()
1674 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); snapshot_map()
1693 remap_exception(s, e, bio, chunk); snapshot_map()
1702 if (bio_rw(bio) == WRITE) { snapshot_map()
1718 remap_exception(s, e, bio, chunk); snapshot_map()
1730 remap_exception(s, &pe->e, bio, chunk); snapshot_map()
1735 bio->bi_iter.bi_size == snapshot_map()
1739 start_full_bio(pe, bio); snapshot_map()
1743 bio_list_add(&pe->snapshot_bios, bio); snapshot_map()
1753 bio->bi_bdev = s->origin->bdev; snapshot_map()
1754 track_chunk(s, bio, chunk); snapshot_map()
1775 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) snapshot_merge_map() argument
1782 init_tracked_chunk(bio); snapshot_merge_map()
1784 if (bio->bi_rw & REQ_FLUSH) { snapshot_merge_map()
1785 if (!dm_bio_get_target_bio_nr(bio)) snapshot_merge_map()
1786 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1788 bio->bi_bdev = s->cow->bdev; snapshot_merge_map()
1792 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); snapshot_merge_map()
1804 if (bio_rw(bio) == WRITE && snapshot_merge_map()
1808 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1809 bio_list_add(&s->bios_queued_during_merge, bio); snapshot_merge_map()
1814 remap_exception(s, e, bio, chunk); snapshot_merge_map()
1816 if (bio_rw(bio) == WRITE) snapshot_merge_map()
1817 track_chunk(s, bio, chunk); snapshot_merge_map()
1822 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1824 if (bio_rw(bio) == WRITE) { snapshot_merge_map()
1826 return do_origin(s->origin, bio); snapshot_merge_map()
1835 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error) snapshot_end_io() argument
1839 if (is_bio_tracked(bio)) snapshot_end_io()
1840 stop_tracking_chunk(s, bio); snapshot_end_io()
2039 * supplied bio was ignored. The caller may submit it immediately.
2044 * and any supplied bio is added to a list to be submitted once all
2048 struct bio *bio) __origin_write()
2119 * If an origin bio was supplied, queue it to wait for the list_for_each_entry()
2123 if (bio) { list_for_each_entry()
2124 bio_list_add(&pe->origin_bios, bio); list_for_each_entry()
2125 bio = NULL; list_for_each_entry()
2148 * Submit the exception against which the bio is queued last,
2160 static int do_origin(struct dm_dev *origin, struct bio *bio) do_origin() argument
2168 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); do_origin()
2261 static int origin_map(struct dm_target *ti, struct bio *bio) origin_map() argument
2266 bio->bi_bdev = o->dev->bdev; origin_map()
2268 if (unlikely(bio->bi_rw & REQ_FLUSH)) origin_map()
2271 if (bio_rw(bio) != WRITE) origin_map()
2275 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); origin_map()
2277 if (bio_sectors(bio) > available_sectors) origin_map()
2278 dm_accept_partial_bio(bio, available_sectors); origin_map()
2281 return do_origin(o->dev, bio); origin_map()
1581 start_full_bio(struct dm_snap_pending_exception *pe, struct bio *bio) start_full_bio() argument
1649 remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) remap_exception() argument
2047 __origin_write(struct list_head *snapshots, sector_t sector, struct bio *bio) __origin_write() argument
H A Ddm-bio-prison.h13 #include <linux/bio.h>
19 * Sometimes we can't deal with a bio straight away. We put them in prison
45 struct bio *holder;
54 * Eventually all bio prison clients should manage their own cell memory.
77 * bio to it.
83 struct bio *inmate,
H A Ddm-verity.c79 mempool_t *vec_mempool; /* mempool of bio vector */
90 /* original values of bio->bi_end_io and bio->bi_private */
354 struct bio *bio = dm_bio_from_per_bio_data(io, verity_verify_io() local
409 struct bio_vec bv = bio_iter_iovec(bio, io->iter); verity_verify_io()
423 bio_advance_iter(bio, &io->iter, len); verity_verify_io()
457 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); verity_finish_io() local
459 bio->bi_end_io = io->orig_bi_end_io; verity_finish_io()
460 bio->bi_private = io->orig_bi_private; verity_finish_io()
462 bio_endio_nodec(bio, error); verity_finish_io()
472 static void verity_end_io(struct bio *bio, int error) verity_end_io() argument
474 struct dm_verity_io *io = bio->bi_private; verity_end_io()
543 * Bio map function. It allocates dm_verity_io structure and bio vector and
546 static int verity_map(struct dm_target *ti, struct bio *bio) verity_map() argument
551 bio->bi_bdev = v->data_dev->bdev; verity_map()
552 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); verity_map()
554 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & verity_map()
560 if (bio_end_sector(bio) >> verity_map()
566 if (bio_data_dir(bio) == WRITE) verity_map()
569 io = dm_per_bio_data(bio, ti->per_bio_data_size); verity_map()
571 io->orig_bi_end_io = bio->bi_end_io; verity_map()
572 io->orig_bi_private = bio->bi_private; verity_map()
573 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); verity_map()
574 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; verity_map()
576 bio->bi_end_io = verity_end_io; verity_map()
577 bio->bi_private = io; verity_map()
578 io->iter = bio->bi_iter; verity_map()
582 generic_make_request(bio); verity_map()
H A Ddm.c16 #include <linux/bio.h>
65 * For bio-based dm.
66 * One of these is allocated per bio.
72 struct bio *bio; member in struct:dm_io
92 * For request-based dm - the bio clones we allocate are embedded in these
96 * the bioset is created - this means the bio has to come at the end of the
100 struct bio *orig;
102 struct bio clone;
213 struct bio flush_bio;
643 struct bio *bio = io->bio; start_io_acct() local
645 int rw = bio_data_dir(bio); start_io_acct()
656 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, start_io_acct()
657 bio_sectors(bio), false, 0, &io->stats_aux); start_io_acct()
663 struct bio *bio = io->bio; end_io_acct() local
666 int rw = bio_data_dir(bio); end_io_acct()
671 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, end_io_acct()
672 bio_sectors(bio), true, duration, &io->stats_aux); end_io_acct()
675 * After this is decremented the bio must not be touched if it is end_io_acct()
688 * Add the bio to the list of deferred io.
690 static void queue_io(struct mapped_device *md, struct bio *bio) queue_io() argument
695 bio_list_add(&md->deferred, bio); queue_io()
894 * Decrements the number of outstanding ios that a bio has been
901 struct bio *bio; dec_pending() local
919 bio_list_add_head(&md->deferred, io->bio); dec_pending()
927 bio = io->bio; dec_pending()
934 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { dec_pending()
939 bio->bi_rw &= ~REQ_FLUSH; dec_pending()
940 queue_io(md, bio); dec_pending()
943 trace_block_bio_complete(md->queue, bio, io_error); dec_pending()
944 bio_endio(bio, io_error); dec_pending()
957 static void clone_endio(struct bio *bio, int error) clone_endio() argument
960 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); clone_endio()
965 if (!bio_flagged(bio, BIO_UPTODATE) && !error) clone_endio()
969 r = endio(tio->ti, bio, error); clone_endio()
985 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && clone_endio()
986 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) clone_endio()
996 static void end_clone_bio(struct bio *clone, int error) end_clone_bio()
1001 struct bio *bio = info->orig; end_clone_bio() local
1024 * I/O for the bio successfully completed. end_clone_bio()
1030 * So the completing bio should always be rq->bio. end_clone_bio()
1033 if (tio->orig->bio != bio) end_clone_bio()
1034 DMERR("bio completion is going in the middle of the request"); end_clone_bio()
1392 * allowed for all bio types except REQ_FLUSH.
1395 * additional n_sectors sectors of the bio and the rest of the data should be
1396 * sent in a next bio.
1409 * Region 2 is the remaining bio size that the target wants to process.
1412 * The target requires that region 3 is to be sent in the next bio.
1414 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1416 * copies of the bio.
1418 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) dm_accept_partial_bio() argument
1420 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); dm_accept_partial_bio()
1421 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; dm_accept_partial_bio()
1422 BUG_ON(bio->bi_rw & REQ_FLUSH); dm_accept_partial_bio()
1426 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; dm_accept_partial_bio()
1435 struct bio *clone = &tio->clone; __map_bio()
1449 /* the bio has been remapped so dispatch it */ __map_bio()
1452 tio->io->bio->bi_bdev->bd_dev, sector); __map_bio()
1469 struct bio *bio; member in struct:clone_info
1475 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) bio_setup_sector() argument
1477 bio->bi_iter.bi_sector = sector; bio_setup_sector()
1478 bio->bi_iter.bi_size = to_bytes(len); bio_setup_sector()
1482 * Creates a bio that consists of range of complete bvecs.
1484 static void clone_bio(struct dm_target_io *tio, struct bio *bio, clone_bio() argument
1487 struct bio *clone = &tio->clone; clone_bio()
1489 __bio_clone_fast(clone, bio); clone_bio()
1491 if (bio_integrity(bio)) clone_bio()
1492 bio_integrity_clone(clone, bio, GFP_NOIO); clone_bio()
1497 if (bio_integrity(bio)) clone_bio()
1506 struct bio *clone; alloc_tio()
1523 struct bio *clone = &tio->clone; __clone_and_map_simple_bio()
1527 __bio_clone_fast(clone, ci->bio); __clone_and_map_simple_bio()
1548 BUG_ON(bio_has_data(ci->bio)); __send_empty_flush()
1558 struct bio *bio = ci->bio; __clone_and_map_data_bio() local
1564 * Does the target want to receive duplicate copies of the bio? __clone_and_map_data_bio()
1566 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) __clone_and_map_data_bio()
1567 num_target_bios = ti->num_write_bios(ti, bio); __clone_and_map_data_bio()
1572 clone_bio(tio, bio, sector, *len); __clone_and_map_data_bio()
1644 * Select the correct strategy for processing a non-flush bio.
1648 struct bio *bio = ci->bio; __split_and_process_non_flush() local
1652 if (unlikely(bio->bi_rw & REQ_DISCARD)) __split_and_process_non_flush()
1654 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) __split_and_process_non_flush()
1672 * Entry point to split a bio into clones and submit them to the targets.
1675 struct dm_table *map, struct bio *bio) __split_and_process_bio()
1681 bio_io_error(bio); __split_and_process_bio()
1690 ci.io->bio = bio; __split_and_process_bio()
1693 ci.sector = bio->bi_iter.bi_sector; __split_and_process_bio()
1697 if (bio->bi_rw & REQ_FLUSH) { __split_and_process_bio()
1698 ci.bio = &ci.md->flush_bio; __split_and_process_bio()
1703 ci.bio = bio; __split_and_process_bio()
1704 ci.sector_count = bio_sectors(bio); __split_and_process_bio()
1771 * The request function that just remaps the bio built up by
1774 static void dm_make_request(struct request_queue *q, struct bio *bio) dm_make_request() argument
1776 int rw = bio_data_dir(bio); dm_make_request()
1783 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); dm_make_request()
1789 if (bio_rw(bio) != READA) dm_make_request()
1790 queue_io(md, bio); dm_make_request()
1792 bio_io_error(bio); dm_make_request()
1796 __split_and_process_bio(md, map, bio); dm_make_request()
1820 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, dm_rq_bio_constructor() argument
1825 container_of(bio, struct dm_rq_clone_bio_info, clone); dm_rq_bio_constructor()
1829 bio->bi_end_io = end_clone_bio; dm_rq_bio_constructor()
2123 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && dm_request_fn()
2228 * Request-based dm devices cannot be stacked on top of bio-based dm dm_init_md_queue()
2424 * to bio from the old bioset, so you must walk __bind_mempools()
3020 struct bio *c; dm_wq_work()
3628 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
1674 __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) __split_and_process_bio() argument
H A Draid10.h86 * this is our 'private' RAID10 bio.
101 * original bio going to /dev/mdx
103 struct bio *master_bio;
116 * We sometimes need an extra bio to write to the replacement.
119 struct bio *bio; member in struct:r10bio::r10dev
121 struct bio *repl_bio; /* used for resync and
H A Ddm-bio-prison.c8 #include "dm-bio-prison.h"
75 struct bio *holder, __setup_new_cell()
109 struct bio *inmate, __bio_detain()
146 struct bio *inmate, bio_detain()
162 struct bio *inmate, dm_bio_detain()
234 struct bio *bio; dm_cell_error() local
239 while ((bio = bio_list_pop(&bios))) dm_cell_error()
240 bio_endio(bio, error); dm_cell_error()
394 MODULE_DESCRIPTION(DM_NAME " bio prison");
H A Ddm-cache-policy-internal.h19 struct bio *bio, struct policy_locker *locker, policy_map()
22 return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result); policy_map()
17 policy_map(struct dm_cache_policy *p, dm_oblock_t oblock, bool can_block, bool can_migrate, bool discarded_oblock, struct bio *bio, struct policy_locker *locker, struct policy_result *result) policy_map() argument
H A Ddm-target.c12 #include <linux/bio.h>
129 static int io_err_map(struct dm_target *tt, struct bio *bio) io_err_map() argument
H A Ddm-region-hash.c127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) dm_rh_bio_to_region() argument
129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - dm_rh_bio_to_region()
387 * @bio
389 * The bio was written on some mirror(s) but failed on other mirror(s).
390 * We can successfully endio the bio but should avoid the region being
395 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) dm_rh_mark_nosync() argument
400 region_t region = dm_rh_bio_to_region(rh, bio); dm_rh_mark_nosync()
403 if (bio->bi_rw & REQ_FLUSH) { dm_rh_mark_nosync()
408 if (bio->bi_rw & REQ_DISCARD) dm_rh_mark_nosync()
528 struct bio *bio; dm_rh_inc_pending() local
530 for (bio = bios->head; bio; bio = bio->bi_next) { dm_rh_inc_pending()
531 if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) dm_rh_inc_pending()
533 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); dm_rh_inc_pending()
690 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) dm_rh_delay() argument
695 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); dm_rh_delay()
696 bio_list_add(&reg->delayed_bios, bio); dm_rh_delay()
H A Draid0.c304 * remaps the bio to the target device. we separate two flows.
331 * position the bio over the real device map_sector()
341 * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
343 * @bvm: properties of new bio
497 unsigned int chunk_sects, struct bio *bio) is_io_in_chunk_boundary()
501 ((bio->bi_iter.bi_sector & (chunk_sects-1)) is_io_in_chunk_boundary()
502 + bio_sectors(bio)); is_io_in_chunk_boundary()
504 sector_t sector = bio->bi_iter.bi_sector; is_io_in_chunk_boundary()
506 + bio_sectors(bio)); is_io_in_chunk_boundary()
510 static void raid0_make_request(struct mddev *mddev, struct bio *bio) raid0_make_request() argument
514 struct bio *split; raid0_make_request()
516 if (unlikely(bio->bi_rw & REQ_FLUSH)) { raid0_make_request()
517 md_flush_request(mddev, bio); raid0_make_request()
522 sector_t sector = bio->bi_iter.bi_sector; raid0_make_request()
531 sector = bio->bi_iter.bi_sector; raid0_make_request()
533 if (sectors < bio_sectors(bio)) { raid0_make_request()
534 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); raid0_make_request()
535 bio_chain(split, bio); raid0_make_request()
537 split = bio; raid0_make_request()
552 } while (split != bio); raid0_make_request()
496 is_io_in_chunk_boundary(struct mddev *mddev, unsigned int chunk_sects, struct bio *bio) is_io_in_chunk_boundary() argument
H A Ddm-bufio.c150 struct bio bio; member in struct:dm_buffer
532 * it is not vmalloced, try using the bio interface.
535 * rejects the bio because it is too large, use dm-io layer to do the I/O.
541 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
542 * that the request was handled directly with bio interface.
548 b->bio.bi_end_io(&b->bio, error ? -EIO : 0); dmio_complete()
575 b->bio.bi_end_io = end_io; use_dmio()
579 end_io(&b->bio, r); use_dmio()
582 static void inline_endio(struct bio *bio, int error) inline_endio() argument
584 bio_end_io_t *end_fn = bio->bi_private; inline_endio()
587 * Reset the bio to free any attached resources inline_endio()
588 * (e.g. bio integrity profiles). inline_endio()
590 bio_reset(bio); inline_endio()
592 end_fn(bio, error); inline_endio()
601 bio_init(&b->bio); use_inline_bio()
602 b->bio.bi_io_vec = b->bio_vec; use_inline_bio()
603 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; use_inline_bio()
604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; use_inline_bio()
605 b->bio.bi_bdev = b->c->bdev; use_inline_bio()
606 b->bio.bi_end_io = inline_endio; use_inline_bio()
609 * the dm_buffer's inline bio is local to bufio. use_inline_bio()
611 b->bio.bi_private = end_io; use_inline_bio()
626 if (!bio_add_page(&b->bio, virt_to_page(ptr), use_inline_bio()
638 submit_bio(rw, &b->bio); use_inline_bio()
664 static void write_endio(struct bio *bio, int error) write_endio() argument
666 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); write_endio()
1029 static void read_endio(struct bio *bio, int error) read_endio() argument
1031 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); read_endio()
H A Ddm-era-target.c1184 static dm_block_t get_block(struct era *era, struct bio *bio) get_block() argument
1186 sector_t block_nr = bio->bi_iter.bi_sector; get_block()
1196 static void remap_to_origin(struct era *era, struct bio *bio) remap_to_origin() argument
1198 bio->bi_bdev = era->origin_dev->bdev; remap_to_origin()
1230 struct bio *bio; process_deferred_bios() local
1242 while ((bio = bio_list_pop(&deferred_bios))) { process_deferred_bios()
1245 get_block(era, bio)); process_deferred_bios()
1256 bio_list_add(&marked_bios, bio); process_deferred_bios()
1266 while ((bio = bio_list_pop(&marked_bios))) process_deferred_bios()
1267 bio_io_error(bio); process_deferred_bios()
1269 while ((bio = bio_list_pop(&marked_bios))) process_deferred_bios()
1270 generic_make_request(bio); process_deferred_bios()
1319 static void defer_bio(struct era *era, struct bio *bio) defer_bio() argument
1322 bio_list_add(&era->deferred_bios, bio); defer_bio()
1531 static int era_map(struct dm_target *ti, struct bio *bio) era_map() argument
1534 dm_block_t block = get_block(era, bio); era_map()
1541 remap_to_origin(era, bio); era_map()
1546 if (!(bio->bi_rw & REQ_FLUSH) && era_map()
1547 (bio_data_dir(bio) == WRITE) && era_map()
1549 defer_bio(era, bio); era_map()
H A Draid1.h112 * this is our 'private' RAID1 bio.
131 * original bio going to /dev/mdx
133 struct bio *master_bio;
147 struct bio *bios[0];
H A Ddm-cache-policy.h24 * When the core target has to remap a bio it calls the 'map' method of the
52 * - remap bio to cache and reissue.
130 * bio - the bio that triggered this call.
137 struct bio *bio, struct policy_locker *locker,
H A Ddm-crypt.c15 #include <linux/bio.h>
41 struct bio *bio_in;
42 struct bio *bio_out;
51 * per bio private data
55 struct bio *base_bio;
125 * pool for per bio private data, crypto requests and
184 static void clone_init(struct dm_crypt_io *, struct bio *);
804 struct bio *bio_out, struct bio *bio_in, crypt_convert_init()
900 struct ablkcipher_request *req, struct bio *base_bio) crypt_free_req()
909 * Encrypt / decrypt data from one bio to another one (can be the same one)
954 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
957 * Generate a new unfragmented bio with the given size
973 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) crypt_alloc_buffer()
976 struct bio *clone; crypt_alloc_buffer()
1023 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) crypt_free_buffer_pages()
1036 struct bio *bio, sector_t sector) crypt_io_init()
1039 io->base_bio = bio; crypt_io_init()
1058 struct bio *base_bio = io->base_bio; crypt_dec_pending()
1087 static void crypt_endio(struct bio *clone, int error) crypt_endio()
1115 static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone_init()
1128 struct bio *clone; kcryptd_io_read()
1132 * the whole bio data *afterwards* -- thanks to immutable kcryptd_io_read()
1169 struct bio *clone = io->ctx.bio_out; kcryptd_io_write()
1235 struct bio *clone = io->ctx.bio_out; kcryptd_crypt_write_io_submit()
1248 /* crypt_convert should have filled the clone bio */ kcryptd_crypt_write_io_submit()
1279 struct bio *clone; kcryptd_crypt_write_convert()
1883 static int crypt_map(struct dm_target *ti, struct bio *bio) crypt_map() argument
1889 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. crypt_map()
1893 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { crypt_map()
1894 bio->bi_bdev = cc->dev->bdev; crypt_map()
1895 if (bio_sectors(bio)) crypt_map()
1896 bio->bi_iter.bi_sector = cc->start + crypt_map()
1897 dm_target_offset(ti, bio->bi_iter.bi_sector); crypt_map()
1901 io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_map()
1902 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_map()
2050 * bio that are not as physically contiguous as the original bio. crypt_io_hints()
1035 crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, struct bio *bio, sector_t sector) crypt_io_init() argument
H A Ddm-cache-policy-mq.c74 static void iot_update_stats(struct io_tracker *t, struct bio *bio) iot_update_stats() argument
76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) iot_update_stats()
91 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); iot_update_stats()
113 static void iot_examine_bio(struct io_tracker *t, struct bio *bio) iot_examine_bio() argument
115 iot_update_stats(t, bio); iot_examine_bio()
704 * would add extra latency to the triggering bio as it demote_cblock()
1029 struct bio *bio, struct policy_locker *locker, mq_map()
1044 iot_examine_bio(&mq->tracker, bio); mq_map()
1046 bio_data_dir(bio), locker, result); mq_map()
1027 mq_map(struct dm_cache_policy *p, dm_oblock_t oblock, bool can_block, bool can_migrate, bool discarded_oblock, struct bio *bio, struct policy_locker *locker, struct policy_result *result) mq_map() argument
H A Ddm.h96 * To check whether the target type is bio-based or not (request-based).
101 * To check whether the target type is request-based or not (bio-based).
108 * either request-based or bio-based).
H A Draid5.c129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
130 * order without overlap. There may be several bio's per stripe+device, and
131 * a bio could span several devices.
133 * beyond a bio that extends past this device, as the next bio might no longer
135 * This function is used to determine the 'next' bio in the list, given the sector
138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) r5_next_bio() argument
140 int sectors = bio_sectors(bio); r5_next_bio()
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) r5_next_bio()
142 return bio->bi_next; r5_next_bio()
151 static inline int raid5_bi_processed_stripes(struct bio *bio) raid5_bi_processed_stripes() argument
153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; raid5_bi_processed_stripes()
157 static inline int raid5_dec_bi_active_stripes(struct bio *bio) raid5_dec_bi_active_stripes() argument
159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; raid5_dec_bi_active_stripes()
163 static inline void raid5_inc_bi_active_stripes(struct bio *bio) raid5_inc_bi_active_stripes() argument
165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; raid5_inc_bi_active_stripes()
169 static inline void raid5_set_bi_processed_stripes(struct bio *bio, raid5_set_bi_processed_stripes() argument
172 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; raid5_set_bi_processed_stripes()
181 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) raid5_set_bi_stripes() argument
183 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; raid5_set_bi_stripes()
226 static void return_io(struct bio *return_bi) return_io()
228 struct bio *bi = return_bi; return_io()
879 raid5_end_read_request(struct bio *bi, int error);
881 raid5_end_write_request(struct bio *bi, int error);
894 struct bio *bi, *rbi; ops_run_io()
1104 async_copy_data(int frombio, struct bio *bio, struct page **page, async_copy_data() argument
1115 if (bio->bi_iter.bi_sector >= sector) async_copy_data()
1116 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; async_copy_data()
1118 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; async_copy_data()
1124 bio_for_each_segment(bvl, bio, iter) { bio_for_each_segment()
1169 struct bio *return_bi = NULL; ops_complete_biofill()
1185 struct bio *rbi, *rbi2; ops_complete_biofill()
1222 struct bio *rbi; ops_run_biofill()
1603 struct bio *chosen; ops_run_biodrain()
1607 struct bio *wbi; ops_run_biodrain()
2286 static void raid5_end_read_request(struct bio * bi, int error) raid5_end_read_request()
2409 static void raid5_end_write_request(struct bio *bi, int error) raid5_end_write_request()
2953 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, add_stripe_bio()
2956 struct bio **bip; add_stripe_bio()
2965 * If several bio share a stripe. The bio bi_phys_segments acts as a add_stripe_bio()
2968 * make_request()), so other bio sharing this stripe will not free the add_stripe_bio()
3075 struct bio **return_bi) handle_failed_stripe()
3080 struct bio *bi; handle_failed_stripe()
3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe()
3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe()
3163 struct bio *nextbi = handle_failed_stripe()
3448 struct stripe_head *sh, int disks, struct bio **return_bi) handle_stripe_clean_event()
3464 struct bio *wbi, *wbi2; handle_stripe_clean_event()
3519 * SCSI discard will change some bio fields and the stripe has handle_stripe_clean_event()
4702 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in_chunk_boundary() argument
4704 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); in_chunk_boundary()
4706 unsigned int bio_sectors = bio_sectors(bio); in_chunk_boundary()
4715 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
4718 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) add_bio_to_retry()
4731 static struct bio *remove_bio_from_retry(struct r5conf *conf) remove_bio_from_retry()
4733 struct bio *bi; remove_bio_from_retry()
4756 * did, call bio_endio on the original bio (having bio_put the new bio
4760 static void raid5_align_endio(struct bio *bi, int error) raid5_align_endio()
4762 struct bio* raid_bi = bi->bi_private; raid5_align_endio()
4791 static int bio_fits_rdev(struct bio *bi) bio_fits_rdev()
4810 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) chunk_aligned_read()
4814 struct bio* align_bi; chunk_aligned_read()
4823 * use bio_clone_mddev to make a copy of the bio chunk_aligned_read()
4830 * original bio. chunk_aligned_read()
5056 static void make_discard_request(struct mddev *mddev, struct bio *bi) make_discard_request()
5148 static void make_request(struct mddev *mddev, struct bio * bi) make_request()
5659 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) retry_aligned_read()
5661 /* We may not be able to submit a whole bio at once as there retry_aligned_read()
5824 struct bio *bio; raid5d() local
5843 while ((bio = remove_bio_from_retry(conf))) { raid5d()
5846 ok = retry_aligned_read(conf, bio); raid5d()
6575 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
H A Ddm-switch.c319 static int switch_map(struct dm_target *ti, struct bio *bio) switch_map() argument
322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); switch_map()
325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; switch_map()
H A Dmd.h455 struct bio *flush_bio;
498 void (*make_request)(struct mddev *mddev, struct bio *bio);
634 extern void md_write_start(struct mddev *mddev, struct bio *bi);
641 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
666 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
668 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
H A Draid5.h241 struct bio req, rreq;
244 struct bio *toread, *read, *towrite, *written;
268 struct bio *return_bi;
285 R5_ReadNoMerge, /* prevent bio from merging in block-layer */
293 R5_Wantfill, /* dev->toread contains a bio that needs
310 R5_SkipCopy, /* Don't copy data from bio to stripe cache */
469 struct bio *retry_read_aligned; /* currently retrying aligned bios */
470 struct bio *retry_read_aligned_list; /* aligned bios retry list */
476 int skip_copy; /* Don't copy data from bio to stripe cache */
H A Dmd.c161 * like bio_clone, but with a local bio set
164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, bio_alloc_mddev()
167 struct bio *b; bio_alloc_mddev()
179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, bio_clone_mddev() argument
183 return bio_clone(bio, gfp_mask); bio_clone_mddev()
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); bio_clone_mddev()
250 * call has finished, the bio has been linked into some internal structure
253 static void md_make_request(struct request_queue *q, struct bio *bio) md_make_request() argument
255 const int rw = bio_data_dir(bio); md_make_request()
262 bio_io_error(bio); md_make_request()
266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); md_make_request()
288 * save the sectors now since our bio can md_make_request()
291 sectors = bio_sectors(bio); md_make_request()
292 /* bio could be mergeable after passing to underlayer */ md_make_request()
293 bio->bi_rw &= ~REQ_NOMERGE; md_make_request()
294 mddev->pers->make_request(mddev, bio); md_make_request()
382 static void md_end_flush(struct bio *bio, int err) md_end_flush() argument
384 struct md_rdev *rdev = bio->bi_private; md_end_flush()
393 bio_put(bio); md_end_flush()
413 struct bio *bi; rdev_for_each_rcu()
434 struct bio *bio = mddev->flush_bio; md_submit_flush_data() local
436 if (bio->bi_iter.bi_size == 0) md_submit_flush_data()
438 bio_endio(bio, 0); md_submit_flush_data()
440 bio->bi_rw &= ~REQ_FLUSH; md_submit_flush_data()
441 mddev->pers->make_request(mddev, bio); md_submit_flush_data()
448 void md_flush_request(struct mddev *mddev, struct bio *bio) md_flush_request() argument
454 mddev->flush_bio = bio; md_flush_request()
733 static void super_written(struct bio *bio, int error) super_written() argument
735 struct md_rdev *rdev = bio->bi_private; super_written()
738 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { super_written()
740 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); super_written()
741 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); super_written()
747 bio_put(bio); super_written()
759 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); md_super_write() local
761 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; md_super_write()
762 bio->bi_iter.bi_sector = sector; md_super_write()
763 bio_add_page(bio, page, size, 0); md_super_write()
764 bio->bi_private = rdev; md_super_write()
765 bio->bi_end_io = super_written; md_super_write()
768 submit_bio(WRITE_FLUSH_FUA, bio); md_super_write()
780 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); sync_page_io() local
783 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? sync_page_io()
786 bio->bi_iter.bi_sector = sector + rdev->sb_start; sync_page_io()
790 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; sync_page_io()
792 bio->bi_iter.bi_sector = sector + rdev->data_offset; sync_page_io()
793 bio_add_page(bio, page, size, 0); sync_page_io()
794 submit_bio_wait(rw, bio); sync_page_io()
796 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); sync_page_io()
797 bio_put(bio); sync_page_io()
7511 void md_write_start(struct mddev *mddev, struct bio *bi) md_write_start()
H A Ddm-cache-policy-cleaner.c174 struct bio *bio, struct policy_locker *locker, wb_map()
172 wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock, bool can_block, bool can_migrate, bool discarded_oblock, struct bio *bio, struct policy_locker *locker, struct policy_result *result) wb_map() argument
/linux-4.1.27/fs/logfs/
H A Ddev_bdev.c9 #include <linux/bio.h>
19 struct bio bio; sync_request() local
22 bio_init(&bio); sync_request()
23 bio.bi_max_vecs = 1; sync_request()
24 bio.bi_io_vec = &bio_vec; sync_request()
28 bio.bi_vcnt = 1; sync_request()
29 bio.bi_bdev = bdev; sync_request()
30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); sync_request()
31 bio.bi_iter.bi_size = PAGE_SIZE; sync_request()
33 return submit_bio_wait(rw, &bio); sync_request()
56 static void writeseg_end_io(struct bio *bio, int err) writeseg_end_io() argument
58 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); writeseg_end_io()
61 struct super_block *sb = bio->bi_private; writeseg_end_io()
67 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
71 bio_put(bio);
81 struct bio *bio; __bdev_writeseg() local
88 bio = bio_alloc(GFP_NOFS, max_pages); __bdev_writeseg()
89 BUG_ON(!bio); __bdev_writeseg()
94 bio->bi_vcnt = i; __bdev_writeseg()
95 bio->bi_iter.bi_size = i * PAGE_SIZE; __bdev_writeseg()
96 bio->bi_bdev = super->s_bdev; __bdev_writeseg()
97 bio->bi_iter.bi_sector = ofs >> 9; __bdev_writeseg()
98 bio->bi_private = sb; __bdev_writeseg()
99 bio->bi_end_io = writeseg_end_io; __bdev_writeseg()
101 submit_bio(WRITE, bio); __bdev_writeseg()
108 bio = bio_alloc(GFP_NOFS, max_pages); __bdev_writeseg()
109 BUG_ON(!bio); __bdev_writeseg()
113 bio->bi_io_vec[i].bv_page = page; __bdev_writeseg()
114 bio->bi_io_vec[i].bv_len = PAGE_SIZE; __bdev_writeseg()
115 bio->bi_io_vec[i].bv_offset = 0; __bdev_writeseg()
121 bio->bi_vcnt = nr_pages; __bdev_writeseg()
122 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; __bdev_writeseg()
123 bio->bi_bdev = super->s_bdev; __bdev_writeseg()
124 bio->bi_iter.bi_sector = ofs >> 9; __bdev_writeseg()
125 bio->bi_private = sb; __bdev_writeseg()
126 bio->bi_end_io = writeseg_end_io; __bdev_writeseg()
128 submit_bio(WRITE, bio); __bdev_writeseg()
156 static void erase_end_io(struct bio *bio, int err) erase_end_io() argument
158 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); erase_end_io()
159 struct super_block *sb = bio->bi_private; erase_end_io()
164 BUG_ON(bio->bi_vcnt == 0); erase_end_io()
165 bio_put(bio); erase_end_io()
174 struct bio *bio; do_erase() local
180 bio = bio_alloc(GFP_NOFS, max_pages); do_erase()
181 BUG_ON(!bio); do_erase()
186 bio->bi_vcnt = i; do_erase()
187 bio->bi_iter.bi_size = i * PAGE_SIZE; do_erase()
188 bio->bi_bdev = super->s_bdev; do_erase()
189 bio->bi_iter.bi_sector = ofs >> 9; do_erase()
190 bio->bi_private = sb; do_erase()
191 bio->bi_end_io = erase_end_io; do_erase()
193 submit_bio(WRITE, bio); do_erase()
200 bio = bio_alloc(GFP_NOFS, max_pages); do_erase()
201 BUG_ON(!bio); do_erase()
203 bio->bi_io_vec[i].bv_page = super->s_erase_page; do_erase()
204 bio->bi_io_vec[i].bv_len = PAGE_SIZE; do_erase()
205 bio->bi_io_vec[i].bv_offset = 0; do_erase()
207 bio->bi_vcnt = nr_pages; do_erase()
208 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; do_erase()
209 bio->bi_bdev = super->s_bdev; do_erase()
210 bio->bi_iter.bi_sector = ofs >> 9; do_erase()
211 bio->bi_private = sb; do_erase()
212 bio->bi_end_io = erase_end_io; do_erase()
214 submit_bio(WRITE, bio); do_erase()
/linux-4.1.27/include/trace/events/
H A Dblock.h139 * the @rq->bio is %NULL, then there is absolutely no additional work to
140 * do for the request. If @rq->bio is non-NULL then there is
248 * @bio: block operation
250 * A bounce buffer was used to handle the block operation @bio in @q.
252 * data between the @bio data memory area and the IO device. Use of a
258 TP_PROTO(struct request_queue *q, struct bio *bio),
260 TP_ARGS(q, bio),
271 __entry->dev = bio->bi_bdev ?
272 bio->bi_bdev->bd_dev : 0;
273 __entry->sector = bio->bi_iter.bi_sector;
274 __entry->nr_sector = bio_sectors(bio);
275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
288 * @bio: block operation completed
292 * block IO operation @bio.
296 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
298 TP_ARGS(q, bio, error),
309 __entry->dev = bio->bi_bdev->bd_dev;
310 __entry->sector = bio->bi_iter.bi_sector;
311 __entry->nr_sector = bio_sectors(bio);
313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
324 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
326 TP_ARGS(q, rq, bio),
337 __entry->dev = bio->bi_bdev->bd_dev;
338 __entry->sector = bio->bi_iter.bi_sector;
339 __entry->nr_sector = bio_sectors(bio);
340 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
353 * @rq: request bio is being merged into
354 * @bio: new block operation to merge
356 * Merging block request @bio to the end of an existing block request
361 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
363 TP_ARGS(q, rq, bio)
369 * @rq: request bio is being merged into
370 * @bio: new block operation to merge
372 * Merging block IO operation @bio to the beginning of an existing block
377 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
379 TP_ARGS(q, rq, bio)
385 * @bio: new block operation
387 * About to place the block IO operation @bio into queue @q.
391 TP_PROTO(struct request_queue *q, struct bio *bio),
393 TP_ARGS(q, bio),
404 __entry->dev = bio->bi_bdev->bd_dev;
405 __entry->sector = bio->bi_iter.bi_sector;
406 __entry->nr_sector = bio_sectors(bio);
407 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
419 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
421 TP_ARGS(q, bio, rw),
432 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
433 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
434 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
436 bio ? bio->bi_rw : 0, __entry->nr_sector);
449 * @bio: pending block IO operation
453 * block IO operation @bio.
457 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
459 TP_ARGS(q, bio, rw)
465 * @bio: pending block IO operation
475 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
477 TP_ARGS(q, bio, rw)
541 * block_split - split a single bio struct into two bio structs
542 * @q: queue containing the bio
543 * @bio: block operation being split
544 * @new_sector: The starting sector for the new bio
546 * The bio request @bio in request queue @q needs to be split into two
547 * bio requests. The newly created @bio request starts at
553 TP_PROTO(struct request_queue *q, struct bio *bio,
556 TP_ARGS(q, bio, new_sector),
567 __entry->dev = bio->bi_bdev->bd_dev;
568 __entry->sector = bio->bi_iter.bi_sector;
570 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
584 * @bio: revised operation
593 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
596 TP_ARGS(q, bio, dev, from),
608 __entry->dev = bio->bi_bdev->bd_dev;
609 __entry->sector = bio->bi_iter.bi_sector;
610 __entry->nr_sector = bio_sectors(bio);
613 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
H A Dbcache.h10 TP_PROTO(struct bcache_device *d, struct bio *bio),
11 TP_ARGS(d, bio),
24 __entry->dev = bio->bi_bdev->bd_dev;
27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
80 TP_PROTO(struct bcache_device *d, struct bio *bio),
81 TP_ARGS(d, bio)
85 TP_PROTO(struct bcache_device *d, struct bio *bio),
86 TP_ARGS(d, bio)
90 TP_PROTO(struct bio *bio),
91 TP_ARGS(bio),
101 __entry->dev = bio->bi_bdev->bd_dev;
102 __entry->sector = bio->bi_iter.bi_sector;
103 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
113 TP_PROTO(struct bio *bio),
114 TP_ARGS(bio)
118 TP_PROTO(struct bio *bio),
119 TP_ARGS(bio)
123 TP_PROTO(struct bio *bio, bool hit, bool bypass),
124 TP_ARGS(bio, hit, bypass),
136 __entry->dev = bio->bi_bdev->bd_dev;
137 __entry->sector = bio->bi_iter.bi_sector;
138 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
151 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153 TP_ARGS(c, inode, bio, writeback, bypass),
168 __entry->sector = bio->bi_iter.bi_sector;
169 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
170 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
182 TP_PROTO(struct bio *bio),
183 TP_ARGS(bio)
224 TP_PROTO(struct bio *bio),
225 TP_ARGS(bio)
H A Df2fs.h741 struct bio *bio),
743 TP_ARGS(sb, fio, bio),
757 __entry->sector = bio->bi_iter.bi_sector;
758 __entry->size = bio->bi_iter.bi_size;
772 struct bio *bio),
774 TP_ARGS(sb, fio, bio),
776 TP_CONDITION(bio)
782 struct bio *bio),
784 TP_ARGS(sb, fio, bio),
786 TP_CONDITION(bio)
/linux-4.1.27/fs/ext4/
H A Dreadpage.c35 #include <linux/bio.h>
58 struct bio *bio = ctx->bio; completion_pages() local
62 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
74 bio_put(bio);
80 static inline bool ext4_bio_encrypted(struct bio *bio) ext4_bio_encrypted() argument
83 return unlikely(bio->bi_private != NULL); ext4_bio_encrypted()
101 static void mpage_end_io(struct bio *bio, int err) mpage_end_io() argument
106 if (ext4_bio_encrypted(bio)) { mpage_end_io()
107 struct ext4_crypto_ctx *ctx = bio->bi_private; mpage_end_io()
113 ctx->bio = bio; mpage_end_io()
118 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
130 bio_put(bio);
137 struct bio *bio = NULL; ext4_mpage_readpages() local
272 if (bio && (last_block_in_bio != blocks[0] - 1)) { ext4_mpage_readpages()
274 submit_bio(READ, bio); ext4_mpage_readpages()
275 bio = NULL; ext4_mpage_readpages()
277 if (bio == NULL) { ext4_mpage_readpages()
286 bio = bio_alloc(GFP_KERNEL, ext4_mpage_readpages()
288 if (!bio) { ext4_mpage_readpages()
293 bio->bi_bdev = bdev; ext4_mpage_readpages()
294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); ext4_mpage_readpages()
295 bio->bi_end_io = mpage_end_io; ext4_mpage_readpages()
296 bio->bi_private = ctx; ext4_mpage_readpages()
300 if (bio_add_page(bio, page, length, 0) < length) ext4_mpage_readpages()
306 submit_bio(READ, bio); ext4_mpage_readpages()
307 bio = NULL; ext4_mpage_readpages()
312 if (bio) { ext4_mpage_readpages()
313 submit_bio(READ, bio); ext4_mpage_readpages()
314 bio = NULL; ext4_mpage_readpages()
325 if (bio) ext4_mpage_readpages()
326 submit_bio(READ, bio); ext4_mpage_readpages()
H A Dpage-io.c21 #include <linux/bio.h>
61 static void ext4_finish_bio(struct bio *bio) ext4_finish_bio() argument
64 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); ext4_finish_bio()
67 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
127 struct bio *bio, *next_bio; ext4_release_io_end() local
136 for (bio = io_end->bio; bio; bio = next_bio) { ext4_release_io_end()
137 next_bio = bio->bi_private; ext4_release_io_end()
138 ext4_finish_bio(bio); ext4_release_io_end()
139 bio_put(bio); ext4_release_io_end()
313 static void ext4_end_bio(struct bio *bio, int error) ext4_end_bio() argument
315 ext4_io_end_t *io_end = bio->bi_private; ext4_end_bio()
316 sector_t bi_sector = bio->bi_iter.bi_sector; ext4_end_bio()
319 bio->bi_end_io = NULL; ext4_end_bio()
320 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) ext4_end_bio()
338 * Link bio into list hanging from io_end. We have to do it ext4_end_bio()
339 * atomically as bio completions can be racing against each ext4_end_bio()
342 bio->bi_private = xchg(&io_end->bio, bio); ext4_end_bio()
347 * we finish the bio. ext4_end_bio()
350 ext4_finish_bio(bio); ext4_end_bio()
351 bio_put(bio); ext4_end_bio()
357 struct bio *bio = io->io_bio; ext4_io_submit() local
359 if (bio) { ext4_io_submit()
380 struct bio *bio; io_submit_init_bio() local
382 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); io_submit_init_bio()
383 if (!bio) io_submit_init_bio()
385 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); io_submit_init_bio()
386 bio->bi_bdev = bh->b_bdev; io_submit_init_bio()
387 bio->bi_end_io = ext4_end_bio; io_submit_init_bio()
388 bio->bi_private = ext4_get_io_end(io->io_end); io_submit_init_bio()
389 io->io_bio = bio; io_submit_init_bio()
H A Dext4_crypto.h82 struct bio *bio; /* The bio for this context */ member in struct:ext4_crypto_ctx
H A Dcrypto.c484 struct bio *bio; ext4_encrypted_zeroout() local
518 bio = bio_alloc(GFP_KERNEL, 1); ext4_encrypted_zeroout()
519 if (!bio) { ext4_encrypted_zeroout()
523 bio->bi_bdev = inode->i_sb->s_bdev; ext4_encrypted_zeroout()
524 bio->bi_iter.bi_sector = pblk; ext4_encrypted_zeroout()
525 err = bio_add_page(bio, ciphertext_page, ext4_encrypted_zeroout()
528 bio_put(bio); ext4_encrypted_zeroout()
531 err = submit_bio_wait(WRITE, bio); ext4_encrypted_zeroout()
/linux-4.1.27/mm/
H A Dpage_io.c18 #include <linux/bio.h>
27 static struct bio *get_swap_bio(gfp_t gfp_flags, get_swap_bio()
30 struct bio *bio; get_swap_bio() local
32 bio = bio_alloc(gfp_flags, 1); get_swap_bio()
33 if (bio) { get_swap_bio()
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); get_swap_bio()
35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; get_swap_bio()
36 bio->bi_io_vec[0].bv_page = page; get_swap_bio()
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; get_swap_bio()
38 bio->bi_io_vec[0].bv_offset = 0; get_swap_bio()
39 bio->bi_vcnt = 1; get_swap_bio()
40 bio->bi_iter.bi_size = PAGE_SIZE; get_swap_bio()
41 bio->bi_end_io = end_io; get_swap_bio()
43 return bio; get_swap_bio()
46 void end_swap_bio_write(struct bio *bio, int err) end_swap_bio_write() argument
48 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_swap_bio_write()
49 struct page *page = bio->bi_io_vec[0].bv_page; end_swap_bio_write()
63 imajor(bio->bi_bdev->bd_inode), end_swap_bio_write()
64 iminor(bio->bi_bdev->bd_inode), end_swap_bio_write()
65 (unsigned long long)bio->bi_iter.bi_sector); end_swap_bio_write()
69 bio_put(bio); end_swap_bio_write()
72 void end_swap_bio_read(struct bio *bio, int err) end_swap_bio_read() argument
74 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_swap_bio_read()
75 struct page *page = bio->bi_io_vec[0].bv_page; end_swap_bio_read()
81 imajor(bio->bi_bdev->bd_inode), end_swap_bio_read()
82 iminor(bio->bi_bdev->bd_inode), end_swap_bio_read()
83 (unsigned long long)bio->bi_iter.bi_sector); end_swap_bio_read()
133 bio_put(bio); end_swap_bio_read()
257 void (*end_write_func)(struct bio *, int)) __swap_writepage()
259 struct bio *bio; __swap_writepage() local
292 * the normal direct-to-bio case as it could __swap_writepage()
311 bio = get_swap_bio(GFP_NOIO, page, end_write_func); __swap_writepage()
312 if (bio == NULL) { __swap_writepage()
323 submit_bio(rw, bio); __swap_writepage()
330 struct bio *bio; swap_readpage() local
359 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); swap_readpage()
360 if (bio == NULL) { swap_readpage()
366 submit_bio(READ, bio); swap_readpage()
/linux-4.1.27/fs/
H A Dmpage.c12 * use bio_add_page() to build bio's just the right size
20 #include <linux/bio.h>
45 static void mpage_end_io(struct bio *bio, int err) mpage_end_io() argument
50 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
52 page_endio(page, bio_data_dir(bio), err); bio_for_each_segment_all()
55 bio_put(bio);
58 static struct bio *mpage_bio_submit(int rw, struct bio *bio) mpage_bio_submit() argument
60 bio->bi_end_io = mpage_end_io; mpage_bio_submit()
61 guard_bio_eod(rw, bio); mpage_bio_submit()
62 submit_bio(rw, bio); mpage_bio_submit()
66 static struct bio * mpage_alloc()
71 struct bio *bio; mpage_alloc() local
73 bio = bio_alloc(gfp_flags, nr_vecs); mpage_alloc()
75 if (bio == NULL && (current->flags & PF_MEMALLOC)) { mpage_alloc()
76 while (!bio && (nr_vecs /= 2)) mpage_alloc()
77 bio = bio_alloc(gfp_flags, nr_vecs); mpage_alloc()
80 if (bio) { mpage_alloc()
81 bio->bi_bdev = bdev; mpage_alloc()
82 bio->bi_iter.bi_sector = first_sector; mpage_alloc()
84 return bio; mpage_alloc()
139 static struct bio * do_mpage_readpage()
140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, do_mpage_readpage() argument
269 if (bio && (*last_block_in_bio != blocks[0] - 1)) do_mpage_readpage()
270 bio = mpage_bio_submit(READ, bio); do_mpage_readpage()
273 if (bio == NULL) { do_mpage_readpage()
279 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), do_mpage_readpage()
282 if (bio == NULL) do_mpage_readpage()
287 if (bio_add_page(bio, page, length, 0) < length) { do_mpage_readpage()
288 bio = mpage_bio_submit(READ, bio); do_mpage_readpage()
296 bio = mpage_bio_submit(READ, bio); do_mpage_readpage()
300 return bio; do_mpage_readpage()
303 if (bio) do_mpage_readpage()
304 bio = mpage_bio_submit(READ, bio); do_mpage_readpage()
359 struct bio *bio = NULL; mpage_readpages() local
374 bio = do_mpage_readpage(bio, page, mpage_readpages()
383 if (bio) mpage_readpages()
384 mpage_bio_submit(READ, bio); mpage_readpages()
394 struct bio *bio = NULL; mpage_readpage() local
401 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, mpage_readpage()
403 if (bio) mpage_readpage()
404 mpage_bio_submit(READ, bio); mpage_readpage()
427 struct bio *bio; member in struct:mpage_data
466 struct bio *bio = mpd->bio; __mpage_writepage() local
592 if (bio && mpd->last_block_in_bio != blocks[0] - 1) __mpage_writepage()
593 bio = mpage_bio_submit(WRITE, bio); __mpage_writepage()
596 if (bio == NULL) { __mpage_writepage()
604 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), __mpage_writepage()
606 if (bio == NULL) __mpage_writepage()
616 if (bio_add_page(bio, page, length, 0) < length) { __mpage_writepage()
617 bio = mpage_bio_submit(WRITE, bio); __mpage_writepage()
627 bio = mpage_bio_submit(WRITE, bio); __mpage_writepage()
638 if (bio) __mpage_writepage()
639 bio = mpage_bio_submit(WRITE, bio); __mpage_writepage()
652 mpd->bio = bio; __mpage_writepage()
688 .bio = NULL, mpage_writepages()
695 if (mpd.bio) mpage_writepages()
696 mpage_bio_submit(WRITE, mpd.bio); mpage_writepages()
707 .bio = NULL, mpage_writepage()
713 if (mpd.bio) mpage_writepage()
714 mpage_bio_submit(WRITE, mpd.bio); mpage_writepage()
H A Ddirect-io.c31 #include <linux/bio.h>
62 struct bio *bio; /* bio under assembly */ member in struct:dio_submit
82 loff_t logical_offset_in_bio; /* current first logical block in bio */
83 sector_t final_block_in_bio; /* current final block in bio + 1 */
125 struct bio *bio_list; /* singly linked via bi_private */
230 * AIO submission can race with bio completion to get here while dio_complete()
231 * expecting to have the last io completed by bio completion. dio_complete()
283 static int dio_bio_complete(struct dio *dio, struct bio *bio);
288 static void dio_bio_end_aio(struct bio *bio, int error) dio_bio_end_aio() argument
290 struct dio *dio = bio->bi_private; dio_bio_end_aio()
294 /* cleanup the bio */ dio_bio_end_aio()
295 dio_bio_complete(dio, bio); dio_bio_end_aio()
321 static void dio_bio_end_io(struct bio *bio, int error) dio_bio_end_io() argument
323 struct dio *dio = bio->bi_private; dio_bio_end_io()
327 bio->bi_private = dio->bio_list; dio_bio_end_io()
328 dio->bio_list = bio; dio_bio_end_io()
335 * dio_end_io - handle the end io action for the given bio
336 * @bio: The direct io bio thats being completed
343 void dio_end_io(struct bio *bio, int error) dio_end_io() argument
345 struct dio *dio = bio->bi_private; dio_end_io()
348 dio_bio_end_aio(bio, error); dio_end_io()
350 dio_bio_end_io(bio, error); dio_end_io()
359 struct bio *bio; dio_bio_alloc() local
362 * bio_alloc() is guaranteed to return a bio when called with dio_bio_alloc()
365 bio = bio_alloc(GFP_KERNEL, nr_vecs); dio_bio_alloc()
367 bio->bi_bdev = bdev; dio_bio_alloc()
368 bio->bi_iter.bi_sector = first_sector; dio_bio_alloc()
370 bio->bi_end_io = dio_bio_end_aio; dio_bio_alloc()
372 bio->bi_end_io = dio_bio_end_io; dio_bio_alloc()
374 sdio->bio = bio; dio_bio_alloc()
387 struct bio *bio = sdio->bio; dio_bio_submit() local
390 bio->bi_private = dio; dio_bio_submit()
397 bio_set_pages_dirty(bio); dio_bio_submit()
400 sdio->submit_io(dio->rw, bio, dio->inode, dio_bio_submit()
403 submit_bio(dio->rw, bio); dio_bio_submit()
405 sdio->bio = NULL; dio_bio_submit()
425 static struct bio *dio_await_one(struct dio *dio) dio_await_one()
428 struct bio *bio = NULL; dio_await_one() local
433 * Wait as long as the list is empty and there are bios in flight. bio dio_await_one()
448 bio = dio->bio_list; dio_await_one()
449 dio->bio_list = bio->bi_private; dio_await_one()
452 return bio; dio_await_one()
458 static int dio_bio_complete(struct dio *dio, struct bio *bio) dio_bio_complete() argument
460 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); dio_bio_complete()
468 bio_check_pages_dirty(bio); /* transfers ownership */ dio_bio_complete()
470 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
477 bio_put(bio);
491 struct bio *bio; dio_await_completion() local
493 bio = dio_await_one(dio); dio_await_completion()
494 if (bio) dio_await_completion()
495 dio_bio_complete(dio, bio); dio_await_completion()
496 } while (bio); dio_await_completion()
513 struct bio *bio; dio_bio_reap() local
517 bio = dio->bio_list; dio_bio_reap()
518 dio->bio_list = bio->bi_private; dio_bio_reap()
520 ret2 = dio_bio_complete(dio, bio); dio_bio_reap()
644 * There is no bio. Make one now.
675 ret = bio_add_page(sdio->bio, sdio->cur_page, dio_bio_add_page()
698 * We take a ref against the page here (on behalf of its presence in the bio).
708 if (sdio->bio) { dio_send_cur_page()
711 sdio->bio->bi_iter.bi_size; dio_send_cur_page()
724 * be the next logical offset in the bio, submit the bio we dio_send_cur_page()
732 if (sdio->bio == NULL) { dio_send_cur_page()
764 * If that doesn't work out then we put the old page into the bio and add this
1270 if (sdio.bio) do_blockdev_direct_IO()
1292 * bio completion will call aio_complete. The only time it's safe to do_blockdev_direct_IO()
H A Dinternal.h41 extern void guard_bio_eod(int rw, struct bio *bio);
H A Dbuffer.c38 #include <linux/bio.h>
2937 static void end_bio_bh_io_sync(struct bio *bio, int err) end_bio_bh_io_sync() argument
2939 struct buffer_head *bh = bio->bi_private; end_bio_bh_io_sync()
2942 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); end_bio_bh_io_sync()
2945 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) end_bio_bh_io_sync()
2948 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); end_bio_bh_io_sync()
2949 bio_put(bio); end_bio_bh_io_sync()
2957 * We'll just truncate the bio to the size of the device,
2964 void guard_bio_eod(int rw, struct bio *bio) guard_bio_eod() argument
2967 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; guard_bio_eod()
2970 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; guard_bio_eod()
2979 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) guard_bio_eod()
2982 maxsector -= bio->bi_iter.bi_sector; guard_bio_eod()
2983 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) guard_bio_eod()
2986 /* Uhhuh. We've got a bio that straddles the device size! */ guard_bio_eod()
2987 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); guard_bio_eod()
2989 /* Truncate the bio.. */ guard_bio_eod()
2990 bio->bi_iter.bi_size -= truncated_bytes; guard_bio_eod()
3002 struct bio *bio; _submit_bh() local
3018 * from here on down, it's all bio -- do the initial mapping, _submit_bh()
3019 * submit_bio -> generic_make_request may further map this bio around _submit_bh()
3021 bio = bio_alloc(GFP_NOIO, 1); _submit_bh()
3023 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); _submit_bh()
3024 bio->bi_bdev = bh->b_bdev; _submit_bh()
3025 bio->bi_io_vec[0].bv_page = bh->b_page; _submit_bh()
3026 bio->bi_io_vec[0].bv_len = bh->b_size; _submit_bh()
3027 bio->bi_io_vec[0].bv_offset = bh_offset(bh); _submit_bh()
3029 bio->bi_vcnt = 1; _submit_bh()
3030 bio->bi_iter.bi_size = bh->b_size; _submit_bh()
3032 bio->bi_end_io = end_bio_bh_io_sync; _submit_bh()
3033 bio->bi_private = bh; _submit_bh()
3034 bio->bi_flags |= bio_flags; _submit_bh()
3037 guard_bio_eod(rw, bio); _submit_bh()
3044 bio_get(bio); _submit_bh()
3045 submit_bio(rw, bio); _submit_bh()
3047 if (bio_flagged(bio, BIO_EOPNOTSUPP)) _submit_bh()
3050 bio_put(bio); _submit_bh()
/linux-4.1.27/fs/nfs/blocklayout/
H A Dblocklayout.c37 #include <linux/bio.h> /* struct bio */
104 static struct bio * bl_submit_bio()
105 bl_submit_bio(int rw, struct bio *bio) bl_submit_bio() argument
107 if (bio) { bl_submit_bio()
108 get_parallel(bio->bi_private); bl_submit_bio()
109 dprintk("%s submitting %s bio %u@%llu\n", __func__, bl_submit_bio()
110 rw == READ ? "read" : "write", bio->bi_iter.bi_size, bl_submit_bio()
111 (unsigned long long)bio->bi_iter.bi_sector); bl_submit_bio()
112 submit_bio(rw, bio); bl_submit_bio()
117 static struct bio * bl_alloc_init_bio()
119 void (*end_io)(struct bio *, int err), struct parallel_io *par) bl_alloc_init_bio()
121 struct bio *bio; bl_alloc_init_bio() local
124 bio = bio_alloc(GFP_NOIO, npg); bl_alloc_init_bio()
125 if (!bio && (current->flags & PF_MEMALLOC)) { bl_alloc_init_bio()
126 while (!bio && (npg /= 2)) bl_alloc_init_bio()
127 bio = bio_alloc(GFP_NOIO, npg); bl_alloc_init_bio()
130 if (bio) { bl_alloc_init_bio()
131 bio->bi_iter.bi_sector = disk_sector; bl_alloc_init_bio()
132 bio->bi_bdev = bdev; bl_alloc_init_bio()
133 bio->bi_end_io = end_io; bl_alloc_init_bio()
134 bio->bi_private = par; bl_alloc_init_bio()
136 return bio; bl_alloc_init_bio()
139 static struct bio * do_add_page_to_bio()
140 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, do_add_page_to_bio() argument
143 void (*end_io)(struct bio *, int err), do_add_page_to_bio()
162 bio = bl_submit_bio(rw, bio); do_add_page_to_bio()
173 if (!bio) { do_add_page_to_bio()
174 bio = bl_alloc_init_bio(npg, map->bdev, do_add_page_to_bio()
176 if (!bio) do_add_page_to_bio()
179 if (bio_add_page(bio, page, *len, offset) < *len) { do_add_page_to_bio()
180 bio = bl_submit_bio(rw, bio); do_add_page_to_bio()
183 return bio; do_add_page_to_bio()
186 static void bl_end_io_read(struct bio *bio, int err) bl_end_io_read() argument
188 struct parallel_io *par = bio->bi_private; bl_end_io_read()
198 bio_put(bio); bl_end_io_read()
227 struct bio *bio = NULL; bl_read_pagelist() local
256 bio = bl_submit_bio(READ, bio); bl_read_pagelist()
281 bio = bl_submit_bio(READ, bio); bl_read_pagelist()
289 bio = do_add_page_to_bio(bio, bl_read_pagelist()
295 if (IS_ERR(bio)) { bl_read_pagelist()
296 header->pnfs_error = PTR_ERR(bio); bl_read_pagelist()
297 bio = NULL; bl_read_pagelist()
313 bl_submit_bio(READ, bio); bl_read_pagelist()
319 static void bl_end_io_write(struct bio *bio, int err) bl_end_io_write() argument
321 struct parallel_io *par = bio->bi_private; bl_end_io_write()
322 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); bl_end_io_write()
330 bio_put(bio); bl_end_io_write()
374 struct bio *bio = NULL; bl_write_pagelist() local
406 bio = bl_submit_bio(WRITE, bio); bl_write_pagelist()
417 bio = do_add_page_to_bio(bio, header->page_array.npages - i, bl_write_pagelist()
421 if (IS_ERR(bio)) { bl_write_pagelist()
422 header->pnfs_error = PTR_ERR(bio); bl_write_pagelist()
423 bio = NULL; bl_write_pagelist()
435 bl_submit_bio(WRITE, bio); bl_write_pagelist()
/linux-4.1.27/drivers/target/
H A Dtarget_core_iblock.c34 #include <linux/bio.h>
309 static void iblock_bio_done(struct bio *bio, int err) iblock_bio_done() argument
311 struct se_cmd *cmd = bio->bi_private; iblock_bio_done()
317 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) iblock_bio_done()
321 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," iblock_bio_done()
322 " err: %d\n", bio, err); iblock_bio_done()
324 * Bump the ib_bio_err_cnt and release bio. iblock_bio_done()
330 bio_put(bio); iblock_bio_done()
335 static struct bio * iblock_get_bio()
339 struct bio *bio; iblock_get_bio() local
342 * Only allocate as many vector entries as the bio code allows us to, iblock_get_bio()
348 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); iblock_get_bio()
349 if (!bio) { iblock_get_bio()
350 pr_err("Unable to allocate memory for bio\n"); iblock_get_bio()
354 bio->bi_bdev = ib_dev->ibd_bd; iblock_get_bio()
355 bio->bi_private = cmd; iblock_get_bio()
356 bio->bi_end_io = &iblock_bio_done; iblock_get_bio()
357 bio->bi_iter.bi_sector = lba; iblock_get_bio()
359 return bio; iblock_get_bio()
365 struct bio *bio; iblock_submit_bios() local
368 while ((bio = bio_list_pop(list))) iblock_submit_bios()
369 submit_bio(rw, bio); iblock_submit_bios()
373 static void iblock_end_io_flush(struct bio *bio, int err) iblock_end_io_flush() argument
375 struct se_cmd *cmd = bio->bi_private; iblock_end_io_flush()
387 bio_put(bio); iblock_end_io_flush()
399 struct bio *bio; iblock_execute_sync_cache() local
408 bio = bio_alloc(GFP_KERNEL, 0); iblock_execute_sync_cache()
409 bio->bi_end_io = iblock_end_io_flush; iblock_execute_sync_cache()
410 bio->bi_bdev = ib_dev->ibd_bd; iblock_execute_sync_cache()
412 bio->bi_private = cmd; iblock_execute_sync_cache()
413 submit_bio(WRITE_FLUSH, bio); iblock_execute_sync_cache()
462 struct bio *bio; iblock_execute_write_same() local
487 bio = iblock_get_bio(cmd, block_lba, 1); iblock_execute_write_same()
488 if (!bio) iblock_execute_write_same()
492 bio_list_add(&list, bio); iblock_execute_write_same()
497 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) iblock_execute_write_same()
500 bio = iblock_get_bio(cmd, block_lba, 1); iblock_execute_write_same()
501 if (!bio) iblock_execute_write_same()
505 bio_list_add(&list, bio); iblock_execute_write_same()
517 while ((bio = bio_list_pop(&list))) iblock_execute_write_same()
518 bio_put(bio); iblock_execute_write_same()
630 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) iblock_alloc_bip() argument
645 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); iblock_alloc_bip()
653 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; iblock_alloc_bip()
660 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, iblock_alloc_bip()
667 pr_debug("Added bio integrity page: %p length: %d offset; %d\n", iblock_alloc_bip()
680 struct bio *bio, *bio_start; iblock_execute_rw() local
739 bio = iblock_get_bio(cmd, block_lba, sgl_nents); iblock_execute_rw()
740 if (!bio) iblock_execute_rw()
743 bio_start = bio; iblock_execute_rw()
745 bio_list_add(&list, bio); iblock_execute_rw()
756 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) for_each_sg()
763 bio = iblock_get_bio(cmd, block_lba, sg_num); for_each_sg()
764 if (!bio) for_each_sg()
768 bio_list_add(&list, bio); for_each_sg()
788 while ((bio = bio_list_pop(&list)))
789 bio_put(bio);
H A Dtarget_core_pscsi.c850 static void pscsi_bi_endio(struct bio *bio, int error) pscsi_bi_endio() argument
852 bio_put(bio); pscsi_bi_endio()
855 static inline struct bio *pscsi_get_bio(int nr_vecs) pscsi_get_bio()
857 struct bio *bio; pscsi_get_bio() local
859 * Use bio_malloc() following the comment in for bio -> struct request pscsi_get_bio()
862 bio = bio_kmalloc(GFP_KERNEL, nr_vecs); pscsi_get_bio()
863 if (!bio) { pscsi_get_bio()
867 bio->bi_end_io = pscsi_bi_endio; pscsi_get_bio()
869 return bio; pscsi_get_bio()
874 enum dma_data_direction data_direction, struct bio **hbio) pscsi_map_sg()
877 struct bio *bio = NULL, *tbio = NULL; pscsi_map_sg() local
909 if (!bio) { for_each_sg()
913 * Calls bio_kmalloc() and sets bio->bi_end_io() for_each_sg()
915 bio = pscsi_get_bio(nr_vecs); for_each_sg()
916 if (!bio) for_each_sg()
920 bio->bi_rw |= REQ_WRITE; for_each_sg()
922 pr_debug("PSCSI: Allocated bio: %p," for_each_sg()
923 " dir: %s nr_vecs: %d\n", bio, for_each_sg()
932 *hbio = tbio = bio; for_each_sg()
934 tbio = tbio->bi_next = bio; for_each_sg()
938 " bio: %p page: %p len: %d off: %d\n", i, bio, for_each_sg()
942 bio, page, bytes, off); for_each_sg()
946 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", for_each_sg()
947 bio->bi_vcnt, nr_vecs); for_each_sg()
949 if (bio->bi_vcnt > nr_vecs) { for_each_sg()
950 pr_debug("PSCSI: Reached bio->bi_vcnt max:" for_each_sg()
951 " %d i: %d bio: %p, allocating another" for_each_sg()
952 " bio\n", bio->bi_vcnt, i, bio); for_each_sg()
954 * Clear the pointer so that another bio will for_each_sg()
956 * current bio has already been set *tbio and for_each_sg()
957 * bio->bi_next. for_each_sg()
959 bio = NULL; for_each_sg()
969 bio = *hbio;
971 bio_endio(bio, 0); /* XXX: should be error */
994 struct bio *hbio; pscsi_execute_cmd()
1057 struct bio *bio = hbio; pscsi_execute_cmd() local
1059 bio_endio(bio, 0); /* XXX: should be error */ pscsi_execute_cmd()
/linux-4.1.27/drivers/block/rsxx/
H A Ddev.c34 #include <linux/bio.h>
46 * If you see a "bio too big" error in the log you will need to raise this
59 struct bio *bio; member in struct:rsxx_bio_meta
113 static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) disk_stats_start() argument
115 generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), disk_stats_start()
120 struct bio *bio, disk_stats_complete()
123 generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, disk_stats_complete()
138 disk_stats_complete(card, meta->bio, meta->start_time); bio_dma_done_cb()
140 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); bio_dma_done_cb()
145 static void rsxx_make_request(struct request_queue *q, struct bio *bio) rsxx_make_request() argument
156 if (bio_end_sector(bio) > get_capacity(card->gendisk)) rsxx_make_request()
169 if (bio->bi_iter.bi_size == 0) { rsxx_make_request()
180 bio_meta->bio = bio; rsxx_make_request()
186 disk_stats_start(card, bio); rsxx_make_request()
189 bio_data_dir(bio) ? 'W' : 'R', bio_meta, rsxx_make_request()
190 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); rsxx_make_request()
192 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, rsxx_make_request()
202 bio_endio(bio, st); rsxx_make_request()
119 disk_stats_complete(struct rsxx_cardinfo *card, struct bio *bio, unsigned long start_time) disk_stats_complete() argument
H A Drsxx_priv.h38 #include <linux/bio.h>
395 struct bio *bio,
H A Ddma.c681 struct bio *bio, rsxx_dma_queue_bio()
700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ rsxx_dma_queue_bio()
708 if (bio->bi_rw & REQ_DISCARD) { rsxx_dma_queue_bio()
709 bv_len = bio->bi_iter.bi_size; rsxx_dma_queue_bio()
726 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
738 bio_data_dir(bio), bio_for_each_segment()
680 rsxx_dma_queue_bio(struct rsxx_cardinfo *card, struct bio *bio, atomic_t *n_dmas, rsxx_dma_cb cb, void *cb_data) rsxx_dma_queue_bio() argument
/linux-4.1.27/fs/btrfs/
H A Dcheck-integrity.h24 void btrfsic_submit_bio(int rw, struct bio *bio);
25 int btrfsic_submit_bio_wait(int rw, struct bio *bio);
H A Draid56.h45 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
48 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
52 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
H A Draid56.c21 #include <linux/bio.h>
91 * bio list and bio_list_lock are used
108 * merge with this bio
185 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
369 * hold the lock for the bio list because we need __remove_rbio_from_cache()
370 * to make sure the bio list is empty __remove_rbio_from_cache()
379 /* if the bio list isn't empty, this rbio is __remove_rbio_from_cache()
529 * returns true if the bio list inside this rbio
531 * Must be called with the bio list lock held, or
598 * bio list here, anyone else that wants to rbio_can_merge()
856 struct bio *cur = bio_list_get(&rbio->bio_list); rbio_orig_end_io()
857 struct bio *next; rbio_orig_end_io()
878 static void raid_write_end_io(struct bio *bio, int err) raid_write_end_io() argument
880 struct btrfs_raid_bio *rbio = bio->bi_private; raid_write_end_io()
883 fail_bio_stripe(rbio, bio); raid_write_end_io()
885 bio_put(bio); raid_write_end_io()
901 * the read/modify/write code wants to use the original bio for
904 * and page number in that stripe fall inside the original bio
1001 /* allocate pages for all the stripes in the bio, including parity */ alloc_rbio_pages()
1050 struct bio *last = bio_list->tail; rbio_add_io_page()
1053 struct bio *bio; rbio_add_io_page() local
1064 /* see if we can add this page onto our existing bio */ rbio_add_io_page()
1082 /* put a new bio on the list */ rbio_add_io_page()
1083 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); rbio_add_io_page()
1084 if (!bio) rbio_add_io_page()
1087 bio->bi_iter.bi_size = 0; rbio_add_io_page()
1088 bio->bi_bdev = stripe->dev->bdev; rbio_add_io_page()
1089 bio->bi_iter.bi_sector = disk_start >> 9; rbio_add_io_page()
1090 set_bit(BIO_UPTODATE, &bio->bi_flags); rbio_add_io_page()
1092 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); rbio_add_io_page()
1093 bio_list_add(bio_list, bio); rbio_add_io_page()
1127 * helper function to walk our bio list and populate the bio_pages array with
1129 * searching through the bio list as we setup the IO in finish_rmw or stripe
1136 struct bio *bio; index_rbio_pages() local
1144 bio_list_for_each(bio, &rbio->bio_list) { index_rbio_pages()
1145 start = (u64)bio->bi_iter.bi_sector << 9; index_rbio_pages()
1149 for (i = 0; i < bio->bi_vcnt; i++) { index_rbio_pages()
1150 p = bio->bi_io_vec[i].bv_page; index_rbio_pages()
1176 struct bio *bio; finish_rmw() local
1196 * bio list here, anyone else that wants to finish_rmw()
1207 * bio list one last time and map the page pointers finish_rmw()
1212 * hopefully they will send another full bio. finish_rmw()
1309 bio = bio_list_pop(&bio_list); finish_rmw()
1310 if (!bio) finish_rmw()
1313 bio->bi_private = rbio; finish_rmw()
1314 bio->bi_end_io = raid_write_end_io; finish_rmw()
1315 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); finish_rmw()
1316 submit_bio(WRITE, bio); finish_rmw()
1325 * helper to find the stripe number for a given bio. Used to figure out which
1326 * stripe has failed. This expects the bio to correspond to a physical disk,
1330 struct bio *bio) find_bio_stripe()
1332 u64 physical = bio->bi_iter.bi_sector; find_bio_stripe()
1344 bio->bi_bdev == stripe->dev->bdev) { find_bio_stripe()
1353 * bio (before mapping). Used to figure out which stripe has
1357 struct bio *bio) find_logical_bio_stripe()
1359 u64 logical = bio->bi_iter.bi_sector; find_logical_bio_stripe()
1408 * bio.
1411 struct bio *bio) fail_bio_stripe()
1413 int failed = find_bio_stripe(rbio, bio); fail_bio_stripe()
1422 * this sets each page in the bio uptodate. It should only be used on private
1425 static void set_bio_pages_uptodate(struct bio *bio) set_bio_pages_uptodate() argument
1430 for (i = 0; i < bio->bi_vcnt; i++) { set_bio_pages_uptodate()
1431 p = bio->bi_io_vec[i].bv_page; set_bio_pages_uptodate()
1444 static void raid_rmw_end_io(struct bio *bio, int err) raid_rmw_end_io() argument
1446 struct btrfs_raid_bio *rbio = bio->bi_private; raid_rmw_end_io()
1449 fail_bio_stripe(rbio, bio); raid_rmw_end_io()
1451 set_bio_pages_uptodate(bio); raid_rmw_end_io()
1453 bio_put(bio); raid_rmw_end_io()
1505 struct bio *bio; raid56_rmw_stripe() local
1526 * page_in_rbio finds a page in the bio list raid56_rmw_stripe()
1535 * the bio cache may have handed us an uptodate raid56_rmw_stripe()
1560 * the bbio may be freed once we submit the last bio. Make sure raid56_rmw_stripe()
1565 bio = bio_list_pop(&bio_list); raid56_rmw_stripe()
1566 if (!bio) raid56_rmw_stripe()
1569 bio->bi_private = rbio; raid56_rmw_stripe()
1570 bio->bi_end_io = raid_rmw_end_io; raid56_rmw_stripe()
1572 btrfs_bio_wq_end_io(rbio->fs_info, bio, raid56_rmw_stripe()
1575 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); raid56_rmw_stripe()
1576 submit_bio(READ, bio); raid56_rmw_stripe()
1739 int raid56_parity_write(struct btrfs_root *root, struct bio *bio, raid56_parity_write() argument
1752 bio_list_add(&rbio->bio_list, bio); raid56_parity_write()
1753 rbio->bio_list_bytes = bio->bi_iter.bi_size; raid56_parity_write()
1835 * pages from the bio list __raid_recover_end_io()
1944 * pages from the bio list __raid_recover_end_io()
1987 static void raid_recover_end_io(struct bio *bio, int err) raid_recover_end_io() argument
1989 struct btrfs_raid_bio *rbio = bio->bi_private; raid_recover_end_io()
1996 fail_bio_stripe(rbio, bio); raid_recover_end_io()
1998 set_bio_pages_uptodate(bio); raid_recover_end_io()
1999 bio_put(bio); raid_recover_end_io()
2026 struct bio *bio; __raid56_parity_recover() local
2082 * the bbio may be freed once we submit the last bio. Make sure __raid56_parity_recover()
2087 bio = bio_list_pop(&bio_list); __raid56_parity_recover()
2088 if (!bio) __raid56_parity_recover()
2091 bio->bi_private = rbio; __raid56_parity_recover()
2092 bio->bi_end_io = raid_recover_end_io; __raid56_parity_recover()
2094 btrfs_bio_wq_end_io(rbio->fs_info, bio, __raid56_parity_recover()
2097 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); __raid56_parity_recover()
2098 submit_bio(READ, bio); __raid56_parity_recover()
2112 * so we assume the bio they send down corresponds to a failed part
2115 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, raid56_parity_recover() argument
2130 bio_list_add(&rbio->bio_list, bio); raid56_parity_recover()
2131 rbio->bio_list_bytes = bio->bi_iter.bi_size; raid56_parity_recover()
2133 rbio->faila = find_logical_bio_stripe(rbio, bio); raid56_parity_recover()
2159 * __raid56_parity_recover will end the bio with raid56_parity_recover()
2196 * raid bio are correct and not be changed during the scrub/replace. That
2201 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, raid56_parity_alloc_scrub_rbio() argument
2212 bio_list_add(&rbio->bio_list, bio); raid56_parity_alloc_scrub_rbio()
2214 * This is a special bio which is used to hold the completion handler raid56_parity_alloc_scrub_rbio()
2217 ASSERT(!bio->bi_iter.bi_size); raid56_parity_alloc_scrub_rbio()
2280 static void raid_write_parity_end_io(struct bio *bio, int err) raid_write_parity_end_io() argument
2282 struct btrfs_raid_bio *rbio = bio->bi_private; raid_write_parity_end_io()
2285 fail_bio_stripe(rbio, bio); raid_write_parity_end_io()
2287 bio_put(bio); raid_write_parity_end_io()
2314 struct bio *bio; finish_parity_scrub() local
2447 bio = bio_list_pop(&bio_list); finish_parity_scrub()
2448 if (!bio) finish_parity_scrub()
2451 bio->bi_private = rbio; finish_parity_scrub()
2452 bio->bi_end_io = raid_write_parity_end_io; finish_parity_scrub()
2453 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); finish_parity_scrub()
2454 submit_bio(WRITE, bio); finish_parity_scrub()
2538 static void raid56_parity_scrub_end_io(struct bio *bio, int err) raid56_parity_scrub_end_io() argument
2540 struct btrfs_raid_bio *rbio = bio->bi_private; raid56_parity_scrub_end_io()
2543 fail_bio_stripe(rbio, bio); raid56_parity_scrub_end_io()
2545 set_bio_pages_uptodate(bio); raid56_parity_scrub_end_io()
2547 bio_put(bio); raid56_parity_scrub_end_io()
2567 struct bio *bio; raid56_parity_scrub_stripe() local
2586 * page_in_rbio finds a page in the bio list raid56_parity_scrub_stripe()
2595 * the bio cache may have handed us an uptodate raid56_parity_scrub_stripe()
2620 * the bbio may be freed once we submit the last bio. Make sure raid56_parity_scrub_stripe()
2625 bio = bio_list_pop(&bio_list); raid56_parity_scrub_stripe()
2626 if (!bio) raid56_parity_scrub_stripe()
2629 bio->bi_private = rbio; raid56_parity_scrub_stripe()
2630 bio->bi_end_io = raid56_parity_scrub_end_io; raid56_parity_scrub_stripe()
2632 btrfs_bio_wq_end_io(rbio->fs_info, bio, raid56_parity_scrub_stripe()
2635 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); raid56_parity_scrub_stripe()
2636 submit_bio(READ, bio); raid56_parity_scrub_stripe()
1329 find_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) find_bio_stripe() argument
1356 find_logical_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) find_logical_bio_stripe() argument
1410 fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) fail_bio_stripe() argument
H A Dcompression.c20 #include <linux/bio.h>
64 /* the compression algorithm for this bio */
74 /* for reads, this is the bio we are copying the data into */
75 struct bio *orig_bio;
97 static struct bio *compressed_bio_alloc(struct block_device *bdev, compressed_bio_alloc()
146 * decompress them and then run the bio end_io routines on the
155 static void end_compressed_bio_read(struct bio *bio, int err) end_compressed_bio_read() argument
157 struct compressed_bio *cb = bio->bi_private; end_compressed_bio_read()
174 (u64)bio->bi_iter.bi_sector << 9); end_compressed_bio_read()
178 /* ok, we're the last bio for this extent, lets start end_compressed_bio_read()
199 /* do io completion on the original bio */ end_compressed_bio_read()
220 bio_put(bio); end_compressed_bio_read()
269 static void end_compressed_bio_write(struct bio *bio, int err) end_compressed_bio_write() argument
272 struct compressed_bio *cb = bio->bi_private; end_compressed_bio_write()
286 /* ok, we're the last bio for this extent, step one is to end_compressed_bio_write()
317 bio_put(bio); end_compressed_bio_write()
335 struct bio *bio = NULL; btrfs_submit_compressed_write() local
364 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); btrfs_submit_compressed_write()
365 if (!bio) { btrfs_submit_compressed_write()
369 bio->bi_private = cb; btrfs_submit_compressed_write()
370 bio->bi_end_io = end_compressed_bio_write; btrfs_submit_compressed_write()
378 if (bio->bi_iter.bi_size) btrfs_submit_compressed_write()
381 bio, 0); btrfs_submit_compressed_write()
386 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < btrfs_submit_compressed_write()
388 bio_get(bio); btrfs_submit_compressed_write()
391 * inc the count before we submit the bio so btrfs_submit_compressed_write()
397 ret = btrfs_bio_wq_end_io(root->fs_info, bio, btrfs_submit_compressed_write()
402 ret = btrfs_csum_one_bio(root, inode, bio, btrfs_submit_compressed_write()
407 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); btrfs_submit_compressed_write()
410 bio_put(bio); btrfs_submit_compressed_write()
412 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); btrfs_submit_compressed_write()
413 BUG_ON(!bio); btrfs_submit_compressed_write()
414 bio->bi_private = cb; btrfs_submit_compressed_write()
415 bio->bi_end_io = end_compressed_bio_write; btrfs_submit_compressed_write()
416 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); btrfs_submit_compressed_write()
427 bio_get(bio); btrfs_submit_compressed_write()
429 ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); btrfs_submit_compressed_write()
433 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); btrfs_submit_compressed_write()
437 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); btrfs_submit_compressed_write()
440 bio_put(bio); btrfs_submit_compressed_write()
556 * for a compressed read, the bio we get passed has all the inode pages
560 * bio->bi_iter.bi_sector points to the compressed extent on disk
561 * bio->bi_io_vec points to all of the inode pages
562 * bio->bi_vcnt is a count of pages
565 * bio we were passed and then call the bio end_io calls
567 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, btrfs_submit_compressed_read() argument
574 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; btrfs_submit_compressed_read()
580 struct bio *comp_bio; btrfs_submit_compressed_read()
581 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; btrfs_submit_compressed_read()
595 page_offset(bio->bi_io_vec->bv_page), btrfs_submit_compressed_read()
622 cb->orig_bio = bio; btrfs_submit_compressed_read()
651 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; btrfs_submit_compressed_read()
683 * inc the count before we submit the bio so btrfs_submit_compressed_read()
919 * The basic idea is that we have a bio that was created by readpages.
920 * The pages in the bio are for the uncompressed data, and they may not
H A Dextent_io.c3 #include <linux/bio.h>
120 struct bio *bio; member in struct:extent_page_data
158 offsetof(struct btrfs_io_bio, bio)); extent_io_init()
2030 struct bio *bio; repair_io_failure() local
2045 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); repair_io_failure()
2046 if (!bio) repair_io_failure()
2048 bio->bi_iter.bi_size = 0; repair_io_failure()
2054 bio_put(bio); repair_io_failure()
2059 bio->bi_iter.bi_sector = sector; repair_io_failure()
2063 bio_put(bio); repair_io_failure()
2066 bio->bi_bdev = dev->bdev; repair_io_failure()
2067 bio_add_page(bio, page, length, pg_offset); repair_io_failure()
2069 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { repair_io_failure()
2071 bio_put(bio); repair_io_failure()
2080 bio_put(bio); repair_io_failure()
2291 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, btrfs_check_repairable()
2318 * we need separate read requests for the failed bio btrfs_check_repairable()
2353 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, btrfs_create_repair_bio()
2358 struct bio *bio; btrfs_create_repair_bio() local
2362 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); btrfs_create_repair_bio()
2363 if (!bio) btrfs_create_repair_bio()
2366 bio->bi_end_io = endio_func; btrfs_create_repair_bio()
2367 bio->bi_iter.bi_sector = failrec->logical >> 9; btrfs_create_repair_bio()
2368 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; btrfs_create_repair_bio()
2369 bio->bi_iter.bi_size = 0; btrfs_create_repair_bio()
2370 bio->bi_private = data; btrfs_create_repair_bio()
2377 btrfs_bio = btrfs_io_bio(bio); btrfs_create_repair_bio()
2384 bio_add_page(bio, page, failrec->len, pg_offset); btrfs_create_repair_bio()
2386 return bio; btrfs_create_repair_bio()
2397 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, bio_readpage_error()
2404 struct bio *bio; bio_readpage_error() local
2426 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, bio_readpage_error()
2430 if (!bio) { bio_readpage_error()
2438 ret = tree->ops->submit_bio_hook(inode, read_mode, bio, bio_readpage_error()
2443 bio_put(bio); bio_readpage_error()
2484 static void end_bio_extent_writepage(struct bio *bio, int err) end_bio_extent_writepage() argument
2491 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
2520 bio_put(bio);
2546 static void end_bio_extent_readpage(struct bio *bio, int err) end_bio_extent_readpage() argument
2549 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); end_bio_extent_readpage()
2550 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); end_bio_extent_readpage()
2565 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
2570 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err, bio_for_each_segment_all()
2613 test_bit(BIO_UPTODATE, &bio->bi_flags)) bio_for_each_segment_all()
2622 * we just go on with the next page in our bio. If it bio_for_each_segment_all()
2626 ret = bio_readpage_error(bio, offset, page, start, end, bio_for_each_segment_all()
2630 test_bit(BIO_UPTODATE, &bio->bi_flags); bio_for_each_segment_all()
2683 bio_put(bio);
2687 * this allocates from the btrfs_bioset. We're returning a bio right now
2690 struct bio * btrfs_bio_alloc()
2695 struct bio *bio; btrfs_bio_alloc() local
2697 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset); btrfs_bio_alloc()
2699 if (bio == NULL && (current->flags & PF_MEMALLOC)) { btrfs_bio_alloc()
2700 while (!bio && (nr_vecs /= 2)) { btrfs_bio_alloc()
2701 bio = bio_alloc_bioset(gfp_flags, btrfs_bio_alloc()
2706 if (bio) { btrfs_bio_alloc()
2707 bio->bi_bdev = bdev; btrfs_bio_alloc()
2708 bio->bi_iter.bi_sector = first_sector; btrfs_bio_alloc()
2709 btrfs_bio = btrfs_io_bio(bio); btrfs_bio_alloc()
2714 return bio; btrfs_bio_alloc()
2717 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) btrfs_bio_clone() argument
2720 struct bio *new; btrfs_bio_clone()
2722 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset); btrfs_bio_clone()
2733 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) btrfs_io_bio_alloc()
2736 struct bio *bio; btrfs_io_bio_alloc() local
2738 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); btrfs_io_bio_alloc()
2739 if (bio) { btrfs_io_bio_alloc()
2740 btrfs_bio = btrfs_io_bio(bio); btrfs_io_bio_alloc()
2745 return bio; btrfs_io_bio_alloc()
2749 static int __must_check submit_one_bio(int rw, struct bio *bio, submit_one_bio() argument
2753 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; submit_one_bio()
2755 struct extent_io_tree *tree = bio->bi_private; submit_one_bio()
2760 bio->bi_private = NULL; submit_one_bio()
2762 bio_get(bio); submit_one_bio()
2765 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, submit_one_bio()
2768 btrfsic_submit_bio(rw, bio); submit_one_bio()
2770 if (bio_flagged(bio, BIO_EOPNOTSUPP)) submit_one_bio()
2772 bio_put(bio); submit_one_bio()
2777 unsigned long offset, size_t size, struct bio *bio, merge_bio()
2782 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, merge_bio()
2793 struct bio **bio_ret, submit_extent_page()
2802 struct bio *bio; submit_extent_page() local
2810 bio = *bio_ret; submit_extent_page()
2812 contig = bio->bi_iter.bi_sector == sector; submit_extent_page()
2814 contig = bio_end_sector(bio) == sector; submit_extent_page()
2818 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || submit_extent_page()
2819 bio_add_page(bio, page, page_size, offset) < page_size) { submit_extent_page()
2820 ret = submit_one_bio(rw, bio, mirror_num, submit_extent_page()
2826 bio = NULL; submit_extent_page()
2836 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); submit_extent_page()
2837 if (!bio) submit_extent_page()
2840 bio_add_page(bio, page, page_size, offset); submit_extent_page()
2841 bio->bi_end_io = end_io_func; submit_extent_page()
2842 bio->bi_private = tree; submit_extent_page()
2845 *bio_ret = bio; submit_extent_page()
2847 ret = submit_one_bio(rw, bio, mirror_num, bio_flags); submit_extent_page()
2910 struct bio **bio, int mirror_num, __do_readpage()
3018 * single bio to populate the pages for the 2 ranges because __do_readpage()
3031 * If the bio to read the compressed extent covers both ranges, __do_readpage()
3038 * make the compressed bio endio callback populate the pages __do_readpage()
3039 * for both ranges because each compressed bio is tightly __do_readpage()
3100 bdev, bio, pnr, __do_readpage()
3130 struct bio **bio, int mirror_num, __do_contiguous_readpages()
3151 __do_readpage(tree, pages[index], get_extent, em_cached, bio, __do_contiguous_readpages()
3161 struct bio **bio, int mirror_num, __extent_readpages()
3183 bio, mirror_num, bio_flags, __extent_readpages()
3194 end, get_extent, em_cached, bio, __extent_readpages()
3202 struct bio **bio, int mirror_num, __extent_read_full_page()
3221 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, __extent_read_full_page()
3229 struct bio *bio = NULL; extent_read_full_page() local
3233 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, extent_read_full_page()
3235 if (bio) extent_read_full_page()
3236 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); extent_read_full_page()
3243 struct bio *bio = NULL; extent_read_full_page_nolock() local
3247 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, extent_read_full_page_nolock()
3249 if (bio) extent_read_full_page_nolock()
3250 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); extent_read_full_page_nolock()
3499 bdev, &epd->bio, max_nr, __extent_writepage_io()
3747 static void end_bio_extent_buffer_writepage(struct bio *bio, int err) end_bio_extent_buffer_writepage() argument
3753 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
3773 bio_put(bio);
3801 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, write_one_eb()
3836 .bio = NULL, btree_write_cache_pages()
4090 if (epd->bio) { flush_epd_write_bio()
4097 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags); flush_epd_write_bio()
4099 epd->bio = NULL; flush_epd_write_bio()
4115 .bio = NULL, extent_write_full_page()
4140 .bio = NULL, extent_write_locked_range()
4180 .bio = NULL, extent_writepages()
4200 struct bio *bio = NULL; extent_readpages() local
4224 &bio, 0, &bio_flags, READ, &prev_em_start); extent_readpages()
4229 &bio, 0, &bio_flags, READ, &prev_em_start); extent_readpages()
4235 if (bio) extent_readpages()
4236 return submit_one_bio(READ, bio, 0, bio_flags); extent_readpages()
5207 struct bio *bio = NULL; read_extent_buffer_pages() local
5250 get_extent, &bio, read_extent_buffer_pages()
5260 if (bio) { read_extent_buffer_pages()
5261 err = submit_one_bio(READ | REQ_META, bio, mirror_num, read_extent_buffer_pages()
2776 merge_bio(int rw, struct extent_io_tree *tree, struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags) merge_bio() argument
2906 __do_readpage(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw, u64 *prev_em_start) __do_readpage() argument
3125 __do_contiguous_readpages(struct extent_io_tree *tree, struct page *pages[], int nr_pages, u64 start, u64 end, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw, u64 *prev_em_start) __do_contiguous_readpages() argument
3157 __extent_readpages(struct extent_io_tree *tree, struct page *pages[], int nr_pages, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw, u64 *prev_em_start) __extent_readpages() argument
3199 __extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, struct bio **bio, int mirror_num, unsigned long *bio_flags, int rw) __extent_read_full_page() argument
H A Dextent_io.h25 * flags for bio submission. The high bits indicate the compression
26 * type for this bio
65 struct bio *bio, int mirror_num,
75 size_t size, struct bio *bio,
328 struct bio *
331 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
332 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
351 * bio end_io callback is called to indicate things have failed.
367 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
369 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
H A Dvolumes.h22 #include <linux/bio.h>
33 struct bio *head;
34 struct bio *tail;
128 struct bio *flush_bio;
269 typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
278 struct bio bio; member in struct:btrfs_io_bio
281 static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) btrfs_io_bio() argument
283 return container_of(bio, struct btrfs_io_bio, bio); btrfs_io_bio()
293 typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
303 struct bio *orig_bio;
425 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
H A Dscrub.c53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
97 struct bio *bio; member in struct:scrub_bio
281 static void scrub_bio_end_io(struct bio *bio, int err);
298 static void scrub_wr_bio_end_io(struct bio *bio, int err);
430 bio_put(sbio->bio); scrub_free_ctx()
465 * to a bio fails. scrub_setup_ctx()
882 * pages failed or the bio failed to read, e.g. with EIO. In the latter
883 * case, this function handles all pages in the bio, even though only one
1000 * the area was part of a huge bio and other parts of the scrub_handle_errored_block()
1001 * bio caused I/O errors, or the block layer merged several scrub_handle_errored_block()
1003 * different bio (usually one of the two latter cases is scrub_handle_errored_block()
1432 static void scrub_bio_wait_endio(struct bio *bio, int error) scrub_bio_wait_endio() argument
1434 struct scrub_bio_ret *ret = bio->bi_private; scrub_bio_wait_endio()
1447 struct bio *bio, scrub_submit_raid56_bio_wait()
1455 bio->bi_iter.bi_sector = page->logical >> 9; scrub_submit_raid56_bio_wait()
1456 bio->bi_private = &done; scrub_submit_raid56_bio_wait()
1457 bio->bi_end_io = scrub_bio_wait_endio; scrub_submit_raid56_bio_wait()
1459 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, scrub_submit_raid56_bio_wait()
1491 struct bio *bio; scrub_recheck_block() local
1501 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); scrub_recheck_block()
1502 if (!bio) { scrub_recheck_block()
1507 bio->bi_bdev = page->dev->bdev; scrub_recheck_block()
1509 bio_add_page(bio, page->page, PAGE_SIZE, 0); scrub_recheck_block()
1511 if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) scrub_recheck_block()
1514 bio->bi_iter.bi_sector = page->physical >> 9; scrub_recheck_block()
1516 if (btrfsic_submit_bio_wait(READ, bio)) scrub_recheck_block()
1520 bio_put(bio); scrub_recheck_block()
1628 struct bio *bio; scrub_repair_page_from_good_copy() local
1638 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); scrub_repair_page_from_good_copy()
1639 if (!bio) scrub_repair_page_from_good_copy()
1641 bio->bi_bdev = page_bad->dev->bdev; scrub_repair_page_from_good_copy()
1642 bio->bi_iter.bi_sector = page_bad->physical >> 9; scrub_repair_page_from_good_copy()
1644 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); scrub_repair_page_from_good_copy()
1646 bio_put(bio); scrub_repair_page_from_good_copy()
1650 if (btrfsic_submit_bio_wait(WRITE, bio)) { scrub_repair_page_from_good_copy()
1656 bio_put(bio); scrub_repair_page_from_good_copy()
1659 bio_put(bio); scrub_repair_page_from_good_copy()
1724 struct bio *bio; scrub_add_page_to_wr_bio() local
1729 bio = sbio->bio; scrub_add_page_to_wr_bio()
1730 if (!bio) { scrub_add_page_to_wr_bio()
1731 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); scrub_add_page_to_wr_bio()
1732 if (!bio) { scrub_add_page_to_wr_bio()
1736 sbio->bio = bio; scrub_add_page_to_wr_bio()
1739 bio->bi_private = sbio; scrub_add_page_to_wr_bio()
1740 bio->bi_end_io = scrub_wr_bio_end_io; scrub_add_page_to_wr_bio()
1741 bio->bi_bdev = sbio->dev->bdev; scrub_add_page_to_wr_bio()
1742 bio->bi_iter.bi_sector = sbio->physical >> 9; scrub_add_page_to_wr_bio()
1752 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); scrub_add_page_to_wr_bio()
1755 bio_put(sbio->bio); scrub_add_page_to_wr_bio()
1756 sbio->bio = NULL; scrub_add_page_to_wr_bio()
1784 WARN_ON(!sbio->bio->bi_bdev); scrub_wr_submit()
1790 btrfsic_submit_bio(WRITE, sbio->bio); scrub_wr_submit()
1793 static void scrub_wr_bio_end_io(struct bio *bio, int err) scrub_wr_bio_end_io() argument
1795 struct scrub_bio *sbio = bio->bi_private; scrub_wr_bio_end_io()
1799 sbio->bio = bio; scrub_wr_bio_end_io()
1829 bio_put(sbio->bio); scrub_wr_bio_end_io_worker()
2091 if (!sbio->bio->bi_bdev) { scrub_submit()
2100 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n"); scrub_submit()
2101 bio_endio(sbio->bio, -EIO); scrub_submit()
2103 btrfsic_submit_bio(READ, sbio->bio); scrub_submit()
2116 * grab a fresh bio or wait for one to become available scrub_add_page_to_rd_bio()
2133 struct bio *bio; scrub_add_page_to_rd_bio() local
2138 bio = sbio->bio; scrub_add_page_to_rd_bio()
2139 if (!bio) { scrub_add_page_to_rd_bio()
2140 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); scrub_add_page_to_rd_bio()
2141 if (!bio) scrub_add_page_to_rd_bio()
2143 sbio->bio = bio; scrub_add_page_to_rd_bio()
2146 bio->bi_private = sbio; scrub_add_page_to_rd_bio()
2147 bio->bi_end_io = scrub_bio_end_io; scrub_add_page_to_rd_bio()
2148 bio->bi_bdev = sbio->dev->bdev; scrub_add_page_to_rd_bio()
2149 bio->bi_iter.bi_sector = sbio->physical >> 9; scrub_add_page_to_rd_bio()
2161 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); scrub_add_page_to_rd_bio()
2164 bio_put(sbio->bio); scrub_add_page_to_rd_bio()
2165 sbio->bio = NULL; scrub_add_page_to_rd_bio()
2172 scrub_block_get(sblock); /* one for the page added to the bio */ scrub_add_page_to_rd_bio()
2198 * a bio later on */ scrub_pages()
2258 /* last one frees, either here or in bio completion for last page */ scrub_pages()
2263 static void scrub_bio_end_io(struct bio *bio, int err) scrub_bio_end_io() argument
2265 struct scrub_bio *sbio = bio->bi_private; scrub_bio_end_io()
2269 sbio->bio = bio; scrub_bio_end_io()
2300 bio_put(sbio->bio); scrub_bio_end_io_worker()
2301 sbio->bio = NULL; scrub_bio_end_io_worker()
2417 /* scrub extent tries to collect up to 64 kB for each bio */ scrub_extent()
2491 * a bio later on */ scrub_pages_for_parity()
2552 /* last one frees, either here or in bio completion for last page */ scrub_pages_for_parity()
2665 static void scrub_parity_bio_endio(struct bio *bio, int error) scrub_parity_bio_endio() argument
2667 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; scrub_parity_bio_endio()
2676 bio_put(bio); scrub_parity_bio_endio()
2682 struct bio *bio; scrub_parity_check_and_repair() local
2700 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); scrub_parity_check_and_repair()
2701 if (!bio) scrub_parity_check_and_repair()
2704 bio->bi_iter.bi_sector = sparity->logic_start >> 9; scrub_parity_check_and_repair()
2705 bio->bi_private = sparity; scrub_parity_check_and_repair()
2706 bio->bi_end_io = scrub_parity_bio_endio; scrub_parity_check_and_repair()
2708 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, scrub_parity_check_and_repair()
2724 bio_put(bio); scrub_parity_check_and_repair()
4193 struct bio *bio; write_page_nocow() local
4205 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); write_page_nocow()
4206 if (!bio) { write_page_nocow()
4212 bio->bi_iter.bi_size = 0; write_page_nocow()
4213 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; write_page_nocow()
4214 bio->bi_bdev = dev->bdev; write_page_nocow()
4215 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); write_page_nocow()
4218 bio_put(bio); write_page_nocow()
4223 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) write_page_nocow()
4226 bio_put(bio); write_page_nocow()
1446 scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) scrub_submit_raid56_bio_wait() argument
H A Dcheck-integrity.c168 bio_end_io_t *bio; member in union:btrfsic_block::__anon10772
339 struct bio *bio, int *bio_is_patched,
346 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
416 b->orig_bio_bh_end_io.bio = NULL; btrfsic_block_init()
1674 struct bio *bio; btrfsic_read_block() local
1677 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); btrfsic_read_block()
1678 if (!bio) { btrfsic_read_block()
1684 bio->bi_bdev = block_ctx->dev->bdev; btrfsic_read_block()
1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; btrfsic_read_block()
1688 ret = bio_add_page(bio, block_ctx->pagev[j], btrfsic_read_block()
1698 if (submit_bio_wait(READ, bio)) { btrfsic_read_block()
1702 bio_put(bio); btrfsic_read_block()
1705 bio_put(bio); btrfsic_read_block()
1821 struct bio *bio, int *bio_is_patched, btrfsic_process_written_block()
1999 if (NULL != bio) { btrfsic_process_written_block()
2004 bio->bi_private; btrfsic_process_written_block()
2005 block->orig_bio_bh_end_io.bio = btrfsic_process_written_block()
2006 bio->bi_end_io; btrfsic_process_written_block()
2008 bio->bi_private = block; btrfsic_process_written_block()
2009 bio->bi_end_io = btrfsic_bio_end_io; btrfsic_process_written_block()
2014 bio->bi_private; btrfsic_process_written_block()
2019 block->orig_bio_bh_end_io.bio = btrfsic_process_written_block()
2021 bio; btrfsic_process_written_block()
2023 bio->bi_private = block; btrfsic_process_written_block()
2035 block->orig_bio_bh_end_io.bio = NULL; btrfsic_process_written_block()
2145 if (NULL != bio) { btrfsic_process_written_block()
2149 block->orig_bio_bh_private = bio->bi_private; btrfsic_process_written_block()
2150 block->orig_bio_bh_end_io.bio = bio->bi_end_io; btrfsic_process_written_block()
2152 bio->bi_private = block; btrfsic_process_written_block()
2153 bio->bi_end_io = btrfsic_bio_end_io; btrfsic_process_written_block()
2158 bio->bi_private; btrfsic_process_written_block()
2163 block->orig_bio_bh_end_io.bio = btrfsic_process_written_block()
2164 chained_block->orig_bio_bh_end_io.bio; btrfsic_process_written_block()
2166 bio->bi_private = block; btrfsic_process_written_block()
2178 block->orig_bio_bh_end_io.bio = NULL; btrfsic_process_written_block()
2210 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) btrfsic_bio_end_io()
2223 bp->bi_end_io = block->orig_bio_bh_end_io.bio; btrfsic_bio_end_io()
2963 static void __btrfsic_submit_bio(int rw, struct bio *bio) __btrfsic_submit_bio() argument
2973 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); __btrfsic_submit_bio()
2975 (rw & WRITE) && NULL != bio->bi_io_vec) { __btrfsic_submit_bio()
2982 dev_bytenr = 512 * bio->bi_iter.bi_sector; __btrfsic_submit_bio()
2989 rw, bio->bi_vcnt, __btrfsic_submit_bio()
2990 (unsigned long long)bio->bi_iter.bi_sector, __btrfsic_submit_bio()
2991 dev_bytenr, bio->bi_bdev); __btrfsic_submit_bio()
2993 mapped_datav = kmalloc_array(bio->bi_vcnt, __btrfsic_submit_bio()
2998 for (i = 0; i < bio->bi_vcnt; i++) { __btrfsic_submit_bio()
2999 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); __btrfsic_submit_bio()
3000 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); __btrfsic_submit_bio()
3004 kunmap(bio->bi_io_vec[i].bv_page); __btrfsic_submit_bio()
3013 i, cur_bytenr, bio->bi_io_vec[i].bv_len, __btrfsic_submit_bio()
3014 bio->bi_io_vec[i].bv_offset); __btrfsic_submit_bio()
3015 cur_bytenr += bio->bi_io_vec[i].bv_len; __btrfsic_submit_bio()
3018 mapped_datav, bio->bi_vcnt, __btrfsic_submit_bio()
3019 bio, &bio_is_patched, __btrfsic_submit_bio()
3023 kunmap(bio->bi_io_vec[i].bv_page); __btrfsic_submit_bio()
3031 rw, bio->bi_bdev); __btrfsic_submit_bio()
3050 block->orig_bio_bh_private = bio->bi_private; __btrfsic_submit_bio()
3051 block->orig_bio_bh_end_io.bio = bio->bi_end_io; __btrfsic_submit_bio()
3053 bio->bi_private = block; __btrfsic_submit_bio()
3054 bio->bi_end_io = btrfsic_bio_end_io; __btrfsic_submit_bio()
3061 void btrfsic_submit_bio(int rw, struct bio *bio) btrfsic_submit_bio() argument
3063 __btrfsic_submit_bio(rw, bio); btrfsic_submit_bio()
3064 submit_bio(rw, bio); btrfsic_submit_bio()
3067 int btrfsic_submit_bio_wait(int rw, struct bio *bio) btrfsic_submit_bio_wait() argument
3069 __btrfsic_submit_bio(rw, bio); btrfsic_submit_bio_wait()
3070 return submit_bio_wait(rw, bio); btrfsic_submit_bio_wait()
1818 btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, u64 dev_bytenr, char **mapped_datav, unsigned int num_pages, struct bio *bio, int *bio_is_patched, struct buffer_head *bh, int submit_bio_bh_rw) btrfsic_process_written_block() argument
H A Dfile-item.c19 #include <linux/bio.h>
156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) btrfs_io_bio_endio_readpage() argument
158 kfree(bio->csum_allocated); btrfs_io_bio_endio_readpage()
162 struct inode *inode, struct bio *bio, __btrfs_lookup_bio_sums()
165 struct bio_vec *bvec = bio->bi_io_vec; __btrfs_lookup_bio_sums()
166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); __btrfs_lookup_bio_sums()
185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; __btrfs_lookup_bio_sums()
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) __btrfs_lookup_bio_sums()
207 WARN_ON(bio->bi_vcnt <= 0); __btrfs_lookup_bio_sums()
220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; __btrfs_lookup_bio_sums()
223 while (bio_index < bio->bi_vcnt) { __btrfs_lookup_bio_sums()
296 struct bio *bio, u32 *dst) btrfs_lookup_bio_sums()
298 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); btrfs_lookup_bio_sums()
302 struct bio *bio, u64 offset) btrfs_lookup_bio_sums_dio()
304 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); btrfs_lookup_bio_sums_dio()
427 struct bio *bio, u64 file_start, int contig) btrfs_csum_one_bio()
432 struct bio_vec *bvec = bio->bi_io_vec; btrfs_csum_one_bio()
439 WARN_ON(bio->bi_vcnt <= 0); btrfs_csum_one_bio()
440 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), btrfs_csum_one_bio()
445 sums->len = bio->bi_iter.bi_size; btrfs_csum_one_bio()
455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; btrfs_csum_one_bio()
458 while (bio_index < bio->bi_vcnt) { btrfs_csum_one_bio()
470 bytes_left = bio->bi_iter.bi_size - total_bytes; btrfs_csum_one_bio()
478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + btrfs_csum_one_bio()
161 __btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 logical_offset, u32 *dst, int dio) __btrfs_lookup_bio_sums() argument
295 btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst) btrfs_lookup_bio_sums() argument
301 btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 offset) btrfs_lookup_bio_sums_dio() argument
426 btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig) btrfs_csum_one_bio() argument
H A Ddisk-io.h121 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
124 int rw, struct bio *bio, int mirror_num,
H A Dcompression.h46 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
H A Ddisk-io.c80 struct bio *bio; member in struct:btrfs_end_io_wq
117 struct bio *bio; member in struct:async_submit_bio
125 * bio_offset is optional, can be used if the pages in the bio
126 * can't tell us where in the file the bio should go
706 static void end_workqueue_bio(struct bio *bio, int err) end_workqueue_bio() argument
708 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; end_workqueue_bio()
716 if (bio->bi_rw & REQ_WRITE) { end_workqueue_bio()
751 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, btrfs_bio_wq_end_io() argument
760 end_io_wq->private = bio->bi_private; btrfs_bio_wq_end_io()
761 end_io_wq->end_io = bio->bi_end_io; btrfs_bio_wq_end_io()
764 end_io_wq->bio = bio; btrfs_bio_wq_end_io()
767 bio->bi_private = end_io_wq; btrfs_bio_wq_end_io()
768 bio->bi_end_io = end_workqueue_bio; btrfs_bio_wq_end_io()
786 ret = async->submit_bio_start(async->inode, async->rw, async->bio, run_one_async_start()
809 /* If an error occured we just want to clean up the bio and move on */ run_one_async_done()
811 bio_endio(async->bio, async->error); run_one_async_done()
815 async->submit_bio_done(async->inode, async->rw, async->bio, run_one_async_done()
829 int rw, struct bio *bio, int mirror_num, btrfs_wq_submit_bio()
843 async->bio = bio; btrfs_wq_submit_bio()
872 static int btree_csum_one_bio(struct bio *bio) btree_csum_one_bio() argument
878 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
889 struct bio *bio, int mirror_num, __btree_submit_bio_start()
897 return btree_csum_one_bio(bio); __btree_submit_bio_start()
900 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, __btree_submit_bio_done() argument
910 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); __btree_submit_bio_done()
912 bio_endio(bio, ret); __btree_submit_bio_done()
927 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btree_submit_bio_hook() argument
940 bio, BTRFS_WQ_ENDIO_METADATA); btree_submit_bio_hook()
943 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, btree_submit_bio_hook()
946 ret = btree_csum_one_bio(bio); btree_submit_bio_hook()
949 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, btree_submit_bio_hook()
957 inode, rw, bio, mirror_num, 0, btree_submit_bio_hook()
965 bio_endio(bio, ret); btree_submit_bio_hook()
1747 * called by the kthread helper functions to finally call the bio end_io
1752 struct bio *bio; end_workqueue_fn() local
1757 bio = end_io_wq->bio; end_workqueue_fn()
1760 bio->bi_private = end_io_wq->private; end_workqueue_fn()
1761 bio->bi_end_io = end_io_wq->end_io; end_workqueue_fn()
1763 bio_endio_nodec(bio, error); end_workqueue_fn()
3301 static void btrfs_end_empty_barrier(struct bio *bio, int err) btrfs_end_empty_barrier() argument
3305 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); btrfs_end_empty_barrier()
3306 clear_bit(BIO_UPTODATE, &bio->bi_flags); btrfs_end_empty_barrier()
3308 if (bio->bi_private) btrfs_end_empty_barrier()
3309 complete(bio->bi_private); btrfs_end_empty_barrier()
3310 bio_put(bio); btrfs_end_empty_barrier()
3322 struct bio *bio; write_dev_flush() local
3329 bio = device->flush_bio; write_dev_flush()
3330 if (!bio) write_dev_flush()
3335 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { write_dev_flush()
3339 } else if (!bio_flagged(bio, BIO_UPTODATE)) { write_dev_flush()
3346 bio_put(bio); write_dev_flush()
3357 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); write_dev_flush()
3358 if (!bio) write_dev_flush()
3361 bio->bi_end_io = btrfs_end_empty_barrier; write_dev_flush()
3362 bio->bi_bdev = device->bdev; write_dev_flush()
3364 bio->bi_private = &device->flush_wait; write_dev_flush()
3365 device->flush_bio = bio; write_dev_flush()
3367 bio_get(bio); write_dev_flush()
3368 btrfsic_submit_bio(WRITE_FLUSH, bio); write_dev_flush()
4367 /* note we're sharing with inode.c for the merge bio hook */
828 btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done) btrfs_wq_submit_bio() argument
888 __btree_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) __btree_submit_bio_start() argument
H A Dvolumes.c19 #include <linux/bio.h>
226 struct bio *head, struct bio *tail) requeue_list()
229 struct bio *old_head; requeue_list()
252 struct bio *pending; run_scheduled_bios()
256 struct bio *tail; run_scheduled_bios()
257 struct bio *cur; run_scheduled_bios()
5037 /* we limit the length of each bio to what fits in a stripe */ __btrfs_map_block()
5588 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) btrfs_end_bbio() argument
5591 bio_endio_nodec(bio, err); btrfs_end_bbio()
5593 bio_endio(bio, err); btrfs_end_bbio()
5597 static void btrfs_end_bio(struct bio *bio, int err) btrfs_end_bio() argument
5599 struct btrfs_bio *bbio = bio->bi_private; btrfs_end_bio()
5607 btrfs_io_bio(bio)->stripe_index; btrfs_end_bio()
5612 if (bio->bi_rw & WRITE) btrfs_end_bio()
5618 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) btrfs_end_bio()
5626 if (bio == bbio->orig_bio) btrfs_end_bio()
5633 bio_put(bio); btrfs_end_bio()
5634 bio = bbio->orig_bio; btrfs_end_bio()
5637 bio->bi_private = bbio->private; btrfs_end_bio()
5638 bio->bi_end_io = bbio->end_io; btrfs_end_bio()
5639 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; btrfs_end_bio()
5641 * beyond the tolerance of the btrfs bio btrfs_end_bio()
5647 * this bio is actually up to date, we didn't btrfs_end_bio()
5650 set_bit(BIO_UPTODATE, &bio->bi_flags); btrfs_end_bio()
5654 btrfs_end_bbio(bbio, bio, err); btrfs_end_bio()
5656 bio_put(bio); btrfs_end_bio()
5664 * This will add one bio to the pending list for a device and make sure
5669 int rw, struct bio *bio) btrfs_schedule_bio()
5675 bio_endio(bio, -EIO); btrfs_schedule_bio()
5681 bio_get(bio); btrfs_schedule_bio()
5682 btrfsic_submit_bio(rw, bio); btrfs_schedule_bio()
5683 bio_put(bio); btrfs_schedule_bio()
5689 * higher layers. Otherwise, the async bio makes it appear we have btrfs_schedule_bio()
5694 WARN_ON(bio->bi_next); btrfs_schedule_bio()
5695 bio->bi_next = NULL; btrfs_schedule_bio()
5696 bio->bi_rw |= rw; btrfs_schedule_bio()
5699 if (bio->bi_rw & REQ_SYNC) btrfs_schedule_bio()
5705 pending_bios->tail->bi_next = bio; btrfs_schedule_bio()
5707 pending_bios->tail = bio; btrfs_schedule_bio()
5709 pending_bios->head = bio; btrfs_schedule_bio()
5720 static int bio_size_ok(struct block_device *bdev, struct bio *bio, bio_size_ok() argument
5729 .bi_rw = bio->bi_rw, bio_size_ok()
5732 if (WARN_ON(bio->bi_vcnt == 0)) bio_size_ok()
5735 prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; bio_size_ok()
5736 if (bio_sectors(bio) > max_sectors) bio_size_ok()
5742 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; bio_size_ok()
5749 struct bio *bio, u64 physical, int dev_nr, submit_stripe_bio()
5754 bio->bi_private = bbio; submit_stripe_bio()
5755 btrfs_io_bio(bio)->stripe_index = dev_nr; submit_stripe_bio()
5756 bio->bi_end_io = btrfs_end_bio; submit_stripe_bio()
5757 bio->bi_iter.bi_sector = physical >> 9; submit_stripe_bio()
5766 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, submit_stripe_bio()
5767 name->str, dev->devid, bio->bi_iter.bi_size); submit_stripe_bio()
5771 bio->bi_bdev = dev->bdev; submit_stripe_bio()
5776 btrfs_schedule_bio(root, dev, rw, bio); submit_stripe_bio()
5778 btrfsic_submit_bio(rw, bio); submit_stripe_bio()
5782 struct bio *first_bio, struct btrfs_device *dev, breakup_stripe_bio()
5786 struct bio *bio; breakup_stripe_bio() local
5791 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS); breakup_stripe_bio()
5792 if (!bio) breakup_stripe_bio()
5796 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, breakup_stripe_bio()
5798 u64 len = bio->bi_iter.bi_size; breakup_stripe_bio()
5801 submit_stripe_bio(root, bbio, bio, physical, dev_nr, breakup_stripe_bio()
5809 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async); breakup_stripe_bio()
5813 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) bbio_error() argument
5817 /* Shoud be the original bio. */ bbio_error()
5818 WARN_ON(bio != bbio->orig_bio); bbio_error()
5820 bio->bi_private = bbio->private; bbio_error()
5821 bio->bi_end_io = bbio->end_io; bbio_error()
5822 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; bbio_error()
5823 bio->bi_iter.bi_sector = logical >> 9; bbio_error()
5825 btrfs_end_bbio(bbio, bio, -EIO); bbio_error()
5829 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, btrfs_map_bio() argument
5833 struct bio *first_bio = bio; btrfs_map_bio()
5834 u64 logical = (u64)bio->bi_iter.bi_sector << 9; btrfs_map_bio()
5842 length = bio->bi_iter.bi_size; btrfs_map_bio()
5864 ret = raid56_parity_write(root, bio, bbio, map_length); btrfs_map_bio()
5866 ret = raid56_parity_recover(root, bio, bbio, map_length, btrfs_map_bio()
5875 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", btrfs_map_bio()
5888 * Check and see if we're ok with this bio based on it's size btrfs_map_bio()
5900 bio = btrfs_bio_clone(first_bio, GFP_NOFS); btrfs_map_bio()
5901 BUG_ON(!bio); /* -ENOMEM */ btrfs_map_bio()
5903 bio = first_bio; btrfs_map_bio()
5907 submit_stripe_bio(root, bbio, bio, btrfs_map_bio()
5667 btrfs_schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) btrfs_schedule_bio() argument
5748 submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, struct bio *bio, u64 physical, int dev_nr, int rw, int async) submit_stripe_bio() argument
H A Dinode.c20 #include <linux/bio.h>
1794 size_t size, struct bio *bio, btrfs_merge_bio_hook()
1798 u64 logical = (u64)bio->bi_iter.bi_sector << 9; btrfs_merge_bio_hook()
1806 length = bio->bi_iter.bi_size; btrfs_merge_bio_hook()
1819 * we wait until bio submission time. All the pages in the bio are
1826 struct bio *bio, int mirror_num, __btrfs_submit_bio_start()
1833 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); __btrfs_submit_bio_start()
1840 * we wait until bio submission time. All the pages in the bio are
1846 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, __btrfs_submit_bio_done() argument
1853 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); __btrfs_submit_bio_done()
1855 bio_endio(bio, ret); __btrfs_submit_bio_done()
1863 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btrfs_submit_bio_hook() argument
1879 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); btrfs_submit_bio_hook()
1884 ret = btrfs_submit_compressed_read(inode, bio, btrfs_submit_bio_hook()
1889 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); btrfs_submit_bio_hook()
1900 inode, rw, bio, mirror_num, btrfs_submit_bio_hook()
1906 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); btrfs_submit_bio_hook()
1912 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); btrfs_submit_bio_hook()
1916 bio_endio(bio, ret); btrfs_submit_bio_hook()
1922 * at IO completion time based on sums calculated at bio submission time.
3051 * if there's a match, we allow the bio to finish. If not, the code in
7625 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, submit_dio_repair_bio() argument
7633 bio_get(bio); submit_dio_repair_bio()
7635 ret = btrfs_bio_wq_end_io(root->fs_info, bio, submit_dio_repair_bio()
7640 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); submit_dio_repair_bio()
7642 bio_put(bio); submit_dio_repair_bio()
7647 struct bio *failed_bio, btrfs_check_dio_repairable()
7680 static int dio_read_error(struct inode *inode, struct bio *failed_bio, dio_read_error()
7686 struct bio *bio; dio_read_error() local
7711 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, dio_read_error()
7713 if (!bio) { dio_read_error()
7722 ret = submit_dio_repair_bio(inode, bio, read_mode, dio_read_error()
7726 bio_put(bio); dio_read_error()
7739 static void btrfs_retry_endio_nocsum(struct bio *bio, int err) btrfs_retry_endio_nocsum() argument
7741 struct btrfs_retry_complete *done = bio->bi_private; btrfs_retry_endio_nocsum()
7749 bio_for_each_segment_all(bvec, bio, i) btrfs_retry_endio_nocsum()
7753 bio_put(bio); btrfs_retry_endio_nocsum()
7768 bio_for_each_segment_all(bvec, &io_bio->bio, i) { __btrfs_correct_data_nocsum()
7774 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, __btrfs_correct_data_nocsum()
7794 static void btrfs_retry_endio(struct bio *bio, int err) btrfs_retry_endio() argument
7796 struct btrfs_retry_complete *done = bio->bi_private; btrfs_retry_endio()
7797 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); btrfs_retry_endio()
7807 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
7821 bio_put(bio);
7838 bio_for_each_segment_all(bvec, &io_bio->bio, i) { __btrfs_subio_endio_read()
7848 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, __btrfs_subio_endio_read()
7886 static void btrfs_endio_direct_read(struct bio *bio, int err) btrfs_endio_direct_read() argument
7888 struct btrfs_dio_private *dip = bio->bi_private; btrfs_endio_direct_read()
7890 struct bio *dio_bio; btrfs_endio_direct_read()
7891 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); btrfs_endio_direct_read()
7909 bio_put(bio); btrfs_endio_direct_read()
7912 static void btrfs_endio_direct_write(struct bio *bio, int err) btrfs_endio_direct_write() argument
7914 struct btrfs_dio_private *dip = bio->bi_private; btrfs_endio_direct_write()
7920 struct bio *dio_bio; btrfs_endio_direct_write()
7938 * our bio might span multiple ordered extents. If we haven't btrfs_endio_direct_write()
7956 bio_put(bio); btrfs_endio_direct_write()
7960 struct bio *bio, int mirror_num, __btrfs_submit_bio_start_direct_io()
7965 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); __btrfs_submit_bio_start_direct_io()
7970 static void btrfs_end_dio_bio(struct bio *bio, int err) btrfs_end_dio_bio() argument
7972 struct btrfs_dio_private *dip = bio->bi_private; btrfs_end_dio_bio()
7977 btrfs_ino(dip->inode), bio->bi_rw, btrfs_end_dio_bio()
7978 (unsigned long long)bio->bi_iter.bi_sector, btrfs_end_dio_bio()
7979 bio->bi_iter.bi_size, err); btrfs_end_dio_bio()
7982 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); btrfs_end_dio_bio()
8005 bio_put(bio); btrfs_end_dio_bio()
8008 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, btrfs_dio_bio_alloc()
8018 struct bio *bio, btrfs_lookup_and_bind_dio_csum()
8021 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); btrfs_lookup_and_bind_dio_csum()
8027 * the first bio to reduce the csum tree search and btrfs_lookup_and_bind_dio_csum()
8037 if (bio == dip->orig_bio) btrfs_lookup_and_bind_dio_csum()
8047 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, __btrfs_submit_dio_bio() argument
8051 struct btrfs_dio_private *dip = bio->bi_private; __btrfs_submit_dio_bio()
8059 bio_get(bio); __btrfs_submit_dio_bio()
8062 ret = btrfs_bio_wq_end_io(root->fs_info, bio, __btrfs_submit_dio_bio()
8073 inode, rw, bio, 0, 0, __btrfs_submit_dio_bio()
8081 * bio now. __btrfs_submit_dio_bio()
8083 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); __btrfs_submit_dio_bio()
8087 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio, __btrfs_submit_dio_bio()
8093 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); __btrfs_submit_dio_bio()
8095 bio_put(bio); __btrfs_submit_dio_bio()
8104 struct bio *bio; btrfs_submit_direct_hook() local
8105 struct bio *orig_bio = dip->orig_bio; btrfs_submit_direct_hook()
8122 bio = orig_bio; btrfs_submit_direct_hook()
8133 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); btrfs_submit_direct_hook()
8134 if (!bio) btrfs_submit_direct_hook()
8137 bio->bi_private = dip; btrfs_submit_direct_hook()
8138 bio->bi_end_io = btrfs_end_dio_bio; btrfs_submit_direct_hook()
8139 btrfs_io_bio(bio)->logical = file_offset; btrfs_submit_direct_hook()
8144 bio_add_page(bio, bvec->bv_page, bvec->bv_len, btrfs_submit_direct_hook()
8147 * inc the count before we submit the bio so btrfs_submit_direct_hook()
8153 ret = __btrfs_submit_dio_bio(bio, inode, rw, btrfs_submit_direct_hook()
8157 bio_put(bio); btrfs_submit_direct_hook()
8168 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, btrfs_submit_direct_hook()
8170 if (!bio) btrfs_submit_direct_hook()
8172 bio->bi_private = dip; btrfs_submit_direct_hook()
8173 bio->bi_end_io = btrfs_end_dio_bio; btrfs_submit_direct_hook()
8174 btrfs_io_bio(bio)->logical = file_offset; btrfs_submit_direct_hook()
8181 bio_put(bio); btrfs_submit_direct_hook()
8192 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, btrfs_submit_direct_hook()
8197 bio_put(bio); btrfs_submit_direct_hook()
8212 static void btrfs_submit_direct(int rw, struct bio *dio_bio, btrfs_submit_direct()
8217 struct bio *io_bio; btrfs_submit_direct()
1793 btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags) btrfs_merge_bio_hook() argument
1825 __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) __btrfs_submit_bio_start() argument
7959 __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 offset) __btrfs_submit_bio_start_direct_io() argument
8015 btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root, struct inode *inode, struct btrfs_dio_private *dip, struct bio *bio, u64 file_offset) btrfs_lookup_and_bind_dio_csum() argument
H A Dbtrfs_inode.h300 struct bio *orig_bio;
303 struct bio *dio_bio;
306 * The original bio may be splited to several sub-bios, this is
/linux-4.1.27/fs/nilfs2/
H A Dsegbuf.c35 struct bio *bio; member in struct:nilfs_write_info
341 static void nilfs_end_bio_write(struct bio *bio, int err) nilfs_end_bio_write() argument
343 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); nilfs_end_bio_write()
344 struct nilfs_segment_buffer *segbuf = bio->bi_private; nilfs_end_bio_write()
347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); nilfs_end_bio_write()
354 bio_put(bio); nilfs_end_bio_write()
361 struct bio *bio = wi->bio; nilfs_segbuf_submit_bio() local
369 bio_put(bio); nilfs_segbuf_submit_bio()
375 bio->bi_end_io = nilfs_end_bio_write; nilfs_segbuf_submit_bio()
376 bio->bi_private = segbuf; nilfs_segbuf_submit_bio()
377 bio_get(bio); nilfs_segbuf_submit_bio()
378 submit_bio(mode, bio); nilfs_segbuf_submit_bio()
380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { nilfs_segbuf_submit_bio()
381 bio_put(bio); nilfs_segbuf_submit_bio()
385 bio_put(bio); nilfs_segbuf_submit_bio()
387 wi->bio = NULL; nilfs_segbuf_submit_bio()
394 wi->bio = NULL; nilfs_segbuf_submit_bio()
399 * nilfs_alloc_seg_bio - allocate a new bio for writing log
401 * @start: start block number of the bio
404 * Return Value: On success, pointer to the struct bio is returned.
407 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, nilfs_alloc_seg_bio()
410 struct bio *bio; nilfs_alloc_seg_bio() local
412 bio = bio_alloc(GFP_NOIO, nr_vecs); nilfs_alloc_seg_bio()
413 if (bio == NULL) { nilfs_alloc_seg_bio()
414 while (!bio && (nr_vecs >>= 1)) nilfs_alloc_seg_bio()
415 bio = bio_alloc(GFP_NOIO, nr_vecs); nilfs_alloc_seg_bio()
417 if (likely(bio)) { nilfs_alloc_seg_bio()
418 bio->bi_bdev = nilfs->ns_bdev; nilfs_alloc_seg_bio()
419 bio->bi_iter.bi_sector = nilfs_alloc_seg_bio()
422 return bio; nilfs_alloc_seg_bio()
428 wi->bio = NULL; nilfs_segbuf_prepare_write()
444 if (!wi->bio) { nilfs_segbuf_submit_bh()
445 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end, nilfs_segbuf_submit_bh()
447 if (unlikely(!wi->bio)) nilfs_segbuf_submit_bh()
451 len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh)); nilfs_segbuf_submit_bh()
456 /* bio is FULL */ nilfs_segbuf_submit_bh()
498 if (wi.bio) { nilfs_segbuf_write()
H A Dsegbuf.h28 #include <linux/bio.h>
71 * @sb_nbio: Number of flying bio requests
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dlloop.c131 struct bio *lo_bio;
132 struct bio *lo_biotail;
145 /* data to handle bio for lustre. */
185 static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) do_bio_lustrebacked()
197 struct bio *bio; do_bio_lustrebacked() local
216 for (bio = head; bio != NULL; bio = bio->bi_next) { do_bio_lustrebacked()
217 LASSERT(rw == bio->bi_rw); do_bio_lustrebacked()
219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; bio_for_each_segment()
220 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
268 * Add bio to back of pending list
270 static void loop_add_bio(struct lloop_device *lo, struct bio *bio) loop_add_bio() argument
276 lo->lo_biotail->bi_next = bio; loop_add_bio()
277 lo->lo_biotail = bio; loop_add_bio()
279 lo->lo_bio = lo->lo_biotail = bio; loop_add_bio()
290 static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) loop_get_bio()
292 struct bio *first; loop_get_bio()
293 struct bio **bio; loop_get_bio() local
305 /* TODO: need to split the bio, too bad. */ loop_get_bio()
309 bio = &lo->lo_bio; loop_get_bio()
310 while (*bio && (*bio)->bi_rw == rw) { loop_get_bio()
311 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", loop_get_bio()
312 (unsigned long long)(*bio)->bi_iter.bi_sector, loop_get_bio()
313 (*bio)->bi_iter.bi_size, loop_get_bio()
314 page_count, (*bio)->bi_vcnt); loop_get_bio()
315 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) loop_get_bio()
319 page_count += (*bio)->bi_vcnt; loop_get_bio()
321 bio = &(*bio)->bi_next; loop_get_bio()
323 if (*bio) { loop_get_bio()
325 lo->lo_bio = *bio; loop_get_bio()
326 *bio = NULL; loop_get_bio()
337 static void loop_make_request(struct request_queue *q, struct bio *old_bio) loop_make_request()
346 CDEBUG(D_INFO, "submit bio sector %llu size %u\n", loop_make_request()
372 static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio) loop_handle_bio() argument
375 ret = do_bio_lustrebacked(lo, bio); loop_handle_bio()
376 while (bio) { loop_handle_bio()
377 struct bio *tmp = bio->bi_next; loop_handle_bio()
378 bio->bi_next = NULL; loop_handle_bio()
379 cfs_bio_endio(bio, bio->bi_iter.bi_size, ret); loop_handle_bio()
380 bio = tmp; loop_handle_bio()
397 struct bio *bio; loop_thread() local
437 bio = NULL; loop_thread()
438 count = loop_get_bio(lo, &bio); loop_thread()
440 CWARN("lloop(minor: %d): missing bio\n", lo->lo_number); loop_thread()
456 LASSERT(bio != NULL); loop_thread()
458 loop_handle_bio(lo, bio); loop_thread()
/linux-4.1.27/fs/gfs2/
H A Dlops.c17 #include <linux/bio.h>
196 * @bio: The bio
205 static void gfs2_end_log_write(struct bio *bio, int error) gfs2_end_log_write() argument
207 struct gfs2_sbd *sdp = bio->bi_private; gfs2_end_log_write()
217 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
225 bio_put(bio);
231 * gfs2_log_flush_bio - Submit any pending log bio
235 * Submit any pending part-built or full bio to the block device. If
236 * there is no pending bio, then this is a no-op.
249 * gfs2_log_alloc_bio - Allocate a new bio for log writing
253 * This should never be called when there is a cached bio in the
254 * super block. When it returns, there will be a cached bio in the
258 * Returns: Newly allocated bio
261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) gfs2_log_alloc_bio()
265 struct bio *bio; gfs2_log_alloc_bio() local
270 bio = bio_alloc(GFP_NOIO, nrvecs); gfs2_log_alloc_bio()
271 if (likely(bio)) gfs2_log_alloc_bio()
276 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); gfs2_log_alloc_bio()
277 bio->bi_bdev = sb->s_bdev; gfs2_log_alloc_bio()
278 bio->bi_end_io = gfs2_end_log_write; gfs2_log_alloc_bio()
279 bio->bi_private = sdp; gfs2_log_alloc_bio()
281 sdp->sd_log_bio = bio; gfs2_log_alloc_bio()
283 return bio; gfs2_log_alloc_bio()
287 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
291 * If there is a cached bio, then if the next block number is sequential
292 * with the previous one, return it, otherwise flush the bio to the
293 * device. If there is not a cached bio, or we just flushed it, then
296 * Returns: The bio to use for log writes
299 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) gfs2_log_get_bio()
301 struct bio *bio = sdp->sd_log_bio; gfs2_log_get_bio() local
304 if (bio) { gfs2_log_get_bio()
305 nblk = bio_end_sector(bio); gfs2_log_get_bio()
308 return bio; gfs2_log_get_bio()
323 * Try and add the page segment to the current bio. If that fails,
324 * submit the current bio to the device and create a new one, and
332 struct bio *bio; gfs2_log_write() local
335 bio = gfs2_log_get_bio(sdp, blkno); gfs2_log_write()
336 ret = bio_add_page(bio, page, size, offset); gfs2_log_write()
339 bio = gfs2_log_alloc_bio(sdp, blkno); gfs2_log_write()
340 ret = bio_add_page(bio, page, size, offset); gfs2_log_write()
H A Dops_fstype.c174 static void end_bio_io_page(struct bio *bio, int error) end_bio_io_page() argument
176 struct page *page = bio->bi_private; end_bio_io_page()
214 * This uses the bio functions to read the super block from disk
232 struct bio *bio; gfs2_read_super() local
242 bio = bio_alloc(GFP_NOFS, 1); gfs2_read_super()
243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); gfs2_read_super()
244 bio->bi_bdev = sb->s_bdev; gfs2_read_super()
245 bio_add_page(bio, page, PAGE_SIZE, 0); gfs2_read_super()
247 bio->bi_end_io = end_bio_io_page; gfs2_read_super()
248 bio->bi_private = page; gfs2_read_super()
249 submit_bio(READ_SYNC | REQ_META, bio); gfs2_read_super()
251 bio_put(bio); gfs2_read_super()
/linux-4.1.27/fs/jfs/
H A Djfs_metapage.c23 #include <linux/bio.h>
279 static void metapage_read_end_io(struct bio *bio, int err) metapage_read_end_io() argument
281 struct page *page = bio->bi_private; metapage_read_end_io()
283 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { metapage_read_end_io()
289 bio_put(bio); metapage_read_end_io()
334 static void metapage_write_end_io(struct bio *bio, int err) metapage_write_end_io() argument
336 struct page *page = bio->bi_private; metapage_write_end_io()
340 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { metapage_write_end_io()
345 bio_put(bio); metapage_write_end_io()
350 struct bio *bio = NULL; metapage_writepage() local
395 if (bio) { metapage_writepage()
404 if (bio_add_page(bio, page, bio_bytes, bio_offset) < metapage_writepage()
412 if (!bio->bi_iter.bi_size) metapage_writepage()
414 submit_bio(WRITE, bio); metapage_writepage()
416 bio = NULL; metapage_writepage()
432 bio = bio_alloc(GFP_NOFS, 1); metapage_writepage()
433 bio->bi_bdev = inode->i_sb->s_bdev; metapage_writepage()
434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); metapage_writepage()
435 bio->bi_end_io = metapage_write_end_io; metapage_writepage()
436 bio->bi_private = page; metapage_writepage()
445 if (bio) { metapage_writepage()
446 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) metapage_writepage()
448 if (!bio->bi_iter.bi_size) metapage_writepage()
451 submit_bio(WRITE, bio); metapage_writepage()
471 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16, metapage_writepage()
472 4, bio, sizeof(*bio), 0); metapage_writepage()
474 bio_put(bio); metapage_writepage()
486 struct bio *bio = NULL; metapage_readpage() local
508 if (bio) metapage_readpage()
509 submit_bio(READ, bio); metapage_readpage()
511 bio = bio_alloc(GFP_NOFS, 1); metapage_readpage()
512 bio->bi_bdev = inode->i_sb->s_bdev; metapage_readpage()
513 bio->bi_iter.bi_sector = metapage_readpage()
515 bio->bi_end_io = metapage_read_end_io; metapage_readpage()
516 bio->bi_private = page; metapage_readpage()
519 if (bio_add_page(bio, page, len, offset) < len) metapage_readpage()
525 if (bio) metapage_readpage()
526 submit_bio(READ, bio); metapage_readpage()
534 bio_put(bio); metapage_readpage()
H A Djfs_logmgr.c68 #include <linux/bio.h>
1987 struct bio *bio; lbmRead() local
1998 bio = bio_alloc(GFP_NOFS, 1); lbmRead()
2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmRead()
2001 bio->bi_bdev = log->bdev; lbmRead()
2002 bio->bi_io_vec[0].bv_page = bp->l_page; lbmRead()
2003 bio->bi_io_vec[0].bv_len = LOGPSIZE; lbmRead()
2004 bio->bi_io_vec[0].bv_offset = bp->l_offset; lbmRead()
2006 bio->bi_vcnt = 1; lbmRead()
2007 bio->bi_iter.bi_size = LOGPSIZE; lbmRead()
2009 bio->bi_end_io = lbmIODone; lbmRead()
2010 bio->bi_private = bp; lbmRead()
2013 bio->bi_iter.bi_size = 0; lbmRead()
2014 lbmIODone(bio, 0); lbmRead()
2016 submit_bio(READ_SYNC, bio); lbmRead()
2140 struct bio *bio; lbmStartIO() local
2145 bio = bio_alloc(GFP_NOFS, 1); lbmStartIO()
2146 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmStartIO()
2147 bio->bi_bdev = log->bdev; lbmStartIO()
2148 bio->bi_io_vec[0].bv_page = bp->l_page; lbmStartIO()
2149 bio->bi_io_vec[0].bv_len = LOGPSIZE; lbmStartIO()
2150 bio->bi_io_vec[0].bv_offset = bp->l_offset; lbmStartIO()
2152 bio->bi_vcnt = 1; lbmStartIO()
2153 bio->bi_iter.bi_size = LOGPSIZE; lbmStartIO()
2155 bio->bi_end_io = lbmIODone; lbmStartIO()
2156 bio->bi_private = bp; lbmStartIO()
2160 bio->bi_iter.bi_size = 0; lbmStartIO()
2161 lbmIODone(bio, 0); lbmStartIO()
2163 submit_bio(WRITE_SYNC, bio); lbmStartIO()
2199 static void lbmIODone(struct bio *bio, int error) lbmIODone() argument
2201 struct lbuf *bp = bio->bi_private; lbmIODone()
2215 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { lbmIODone()
2221 bio_put(bio); lbmIODone()
/linux-4.1.27/drivers/xen/
H A Dbiomerge.c1 #include <linux/bio.h>
/linux-4.1.27/drivers/block/
H A Dumem.c31 * 15May2002:NeilBrown - convert to bio for 2.5
39 #include <linux/bio.h>
110 struct bio *bio, *currentbio, **biotail; member in struct:cardinfo
119 struct bio *bio, **biotail; member in struct:cardinfo::mm_page
235 * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME
330 page->bio = NULL; reset_page()
331 page->biotail = &page->bio; reset_page()
345 struct bio *bio; add_bio() local
349 bio = card->currentbio; add_bio()
350 if (!bio && card->bio) { add_bio()
351 card->currentbio = card->bio; add_bio()
352 card->current_iter = card->bio->bi_iter; add_bio()
353 card->bio = card->bio->bi_next; add_bio()
354 if (card->bio == NULL) add_bio()
355 card->biotail = &card->bio; add_bio()
359 if (!bio) add_bio()
362 rw = bio_rw(bio); add_bio()
366 vec = bio_iter_iovec(bio, card->current_iter); add_bio()
378 if (p->bio == NULL) add_bio()
380 if ((p->biotail) != &bio->bi_next) { add_bio()
381 *(p->biotail) = bio; add_bio()
382 p->biotail = &(bio->bi_next); add_bio()
383 bio->bi_next = NULL; add_bio()
406 bio_advance_iter(bio, &card->current_iter, vec.bv_len); add_bio()
422 struct bio *return_bio = NULL; process_page()
432 struct bio *bio = page->bio; process_page() local
444 vec = bio_iter_iovec(bio, page->iter); process_page()
445 bio_advance_iter(bio, &page->iter, vec.bv_len); process_page()
448 page->bio = bio->bi_next; process_page()
449 if (page->bio) process_page()
450 page->iter = page->bio->bi_iter; process_page()
459 clear_bit(BIO_UPTODATE, &bio->bi_flags); process_page()
465 } else if ((bio->bi_rw & REQ_WRITE) && process_page()
475 if (bio != page->bio) { process_page()
476 bio->bi_next = return_bio; process_page()
477 return_bio = bio; process_page()
504 struct bio *bio = return_bio; process_page() local
506 return_bio = bio->bi_next; process_page()
507 bio->bi_next = NULL; process_page()
508 bio_endio(bio, 0); process_page()
527 static void mm_make_request(struct request_queue *q, struct bio *bio) mm_make_request() argument
531 (unsigned long long)bio->bi_iter.bi_sector, mm_make_request()
532 bio->bi_iter.bi_size); mm_make_request()
535 *card->biotail = bio; mm_make_request()
536 bio->bi_next = NULL; mm_make_request()
537 card->biotail = &bio->bi_next; mm_make_request()
538 if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) mm_make_request()
890 card->bio = NULL; mm_pci_probe()
891 card->biotail = &card->bio; mm_pci_probe()
H A Dpktcdvd.c28 * Its data is defined by the struct packet_iosched and includes two bio
545 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); pkt_alloc_packet_data() local
546 if (!bio) pkt_alloc_packet_data()
549 pkt->r_bios[i] = bio; pkt_alloc_packet_data()
556 struct bio *bio = pkt->r_bios[i]; pkt_alloc_packet_data() local
557 if (bio) pkt_alloc_packet_data()
558 bio_put(bio); pkt_alloc_packet_data()
580 struct bio *bio = pkt->r_bios[i]; pkt_free_packet_data() local
581 if (bio) pkt_free_packet_data()
582 bio_put(bio); pkt_free_packet_data()
654 if (s <= tmp->bio->bi_iter.bi_sector) pkt_rbtree_find()
663 if (s > tmp->bio->bi_iter.bi_sector) { pkt_rbtree_find()
668 BUG_ON(s > tmp->bio->bi_iter.bi_sector); pkt_rbtree_find()
679 sector_t s = node->bio->bi_iter.bi_sector; pkt_rbtree_insert()
685 if (s < tmp->bio->bi_iter.bi_sector) pkt_rbtree_insert()
808 * Queue a bio for processing by the low-level CD device. Must be called
811 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) pkt_queue_bio() argument
814 if (bio_data_dir(bio) == READ) pkt_queue_bio()
815 bio_list_add(&pd->iosched.read_queue, bio); pkt_queue_bio()
817 bio_list_add(&pd->iosched.write_queue, bio); pkt_queue_bio()
848 struct bio *bio; pkt_iosched_process_queue() local
862 bio = bio_list_peek(&pd->iosched.write_queue); pkt_iosched_process_queue()
864 if (bio && (bio->bi_iter.bi_sector == pkt_iosched_process_queue()
887 bio = bio_list_pop(&pd->iosched.write_queue); pkt_iosched_process_queue()
889 bio = bio_list_pop(&pd->iosched.read_queue); pkt_iosched_process_queue()
892 if (!bio) pkt_iosched_process_queue()
895 if (bio_data_dir(bio) == READ) pkt_iosched_process_queue()
897 bio->bi_iter.bi_size >> 10; pkt_iosched_process_queue()
900 pd->iosched.last_write = bio_end_sector(bio); pkt_iosched_process_queue()
915 generic_make_request(bio); pkt_iosched_process_queue()
948 * a) The number of required segments for the write bio is minimized, which
979 static void pkt_end_io_read(struct bio *bio, int err) pkt_end_io_read() argument
981 struct packet_data *pkt = bio->bi_private; pkt_end_io_read()
985 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", pkt_end_io_read()
986 bio, (unsigned long long)pkt->sector, pkt_end_io_read()
987 (unsigned long long)bio->bi_iter.bi_sector, err); pkt_end_io_read()
998 static void pkt_end_io_packet_write(struct bio *bio, int err) pkt_end_io_packet_write() argument
1000 struct packet_data *pkt = bio->bi_private; pkt_end_io_packet_write()
1020 struct bio *bio; pkt_gather_data() local
1034 bio_list_for_each(bio, &pkt->orig_bios) { pkt_gather_data()
1035 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / pkt_gather_data()
1037 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; pkt_gather_data()
1061 bio = pkt->r_bios[f]; pkt_gather_data()
1062 bio_reset(bio); pkt_gather_data()
1063 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); pkt_gather_data()
1064 bio->bi_bdev = pd->bdev; pkt_gather_data()
1065 bio->bi_end_io = pkt_end_io_read; pkt_gather_data()
1066 bio->bi_private = pkt; pkt_gather_data()
1072 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) pkt_gather_data()
1076 bio->bi_rw = READ; pkt_gather_data()
1077 pkt_queue_bio(pd, bio); pkt_gather_data()
1157 bio_reset(pkt->bio); pkt_start_recovery()
1158 pkt->bio->bi_bdev = pd->bdev; pkt_start_recovery()
1159 pkt->bio->bi_rw = REQ_WRITE; pkt_start_recovery()
1160 pkt->bio->bi_iter.bi_sector = new_sector; pkt_start_recovery()
1161 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; pkt_start_recovery()
1162 pkt->bio->bi_vcnt = pkt->frames; pkt_start_recovery()
1164 pkt->bio->bi_end_io = pkt_end_io_packet_write; pkt_start_recovery()
1165 pkt->bio->bi_private = pkt; pkt_start_recovery()
1197 struct bio *bio = NULL; pkt_handle_queue() local
1222 bio = node->bio; pkt_handle_queue()
1223 zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_handle_queue()
1226 bio = NULL; pkt_handle_queue()
1242 if (!bio) { pkt_handle_queue()
1243 pkt_dbg(2, pd, "no bio\n"); pkt_handle_queue()
1261 bio = node->bio; pkt_handle_queue()
1263 get_zone(bio->bi_iter.bi_sector, pd)); pkt_handle_queue()
1264 if (get_zone(bio->bi_iter.bi_sector, pd) != zone) pkt_handle_queue()
1268 bio_list_add(&pkt->orig_bios, bio); pkt_handle_queue()
1269 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; pkt_handle_queue()
1294 * Assemble a bio to write one packet and queue the bio for processing
1344 struct bio *bio; pkt_finish_packet() local
1350 while ((bio = bio_list_pop(&pkt->orig_bios))) pkt_finish_packet()
1351 bio_endio(bio, uptodate ? 0 : -EIO); pkt_finish_packet()
2334 static void pkt_end_io_read_cloned(struct bio *bio, int err) pkt_end_io_read_cloned() argument
2336 struct packet_stacked_data *psd = bio->bi_private; pkt_end_io_read_cloned()
2339 bio_put(bio); pkt_end_io_read_cloned()
2340 bio_endio(psd->bio, err); pkt_end_io_read_cloned()
2345 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) pkt_make_request_read() argument
2347 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO); pkt_make_request_read()
2351 psd->bio = bio; pkt_make_request_read()
2355 pd->stats.secs_r += bio_sectors(bio); pkt_make_request_read()
2359 static void pkt_make_request_write(struct request_queue *q, struct bio *bio) pkt_make_request_write() argument
2367 zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_make_request_write()
2371 * just append this bio to that packet. pkt_make_request_write()
2380 bio_list_add(&pkt->orig_bios, bio); pkt_make_request_write()
2382 bio->bi_iter.bi_size / CD_FRAMESIZE; pkt_make_request_write()
2400 * Test if there is enough room left in the bio work queue pkt_make_request_write()
2417 * No matching packet found. Store the bio in the work queue. pkt_make_request_write()
2420 node->bio = bio; pkt_make_request_write()
2443 static void pkt_make_request(struct request_queue *q, struct bio *bio) pkt_make_request() argument
2447 struct bio *split; pkt_make_request()
2452 bdevname(bio->bi_bdev, b)); pkt_make_request()
2457 (unsigned long long)bio->bi_iter.bi_sector, pkt_make_request()
2458 (unsigned long long)bio_end_sector(bio)); pkt_make_request()
2463 if (bio_data_dir(bio) == READ) { pkt_make_request()
2464 pkt_make_request_read(pd, bio); pkt_make_request()
2470 (unsigned long long)bio->bi_iter.bi_sector); pkt_make_request()
2474 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { pkt_make_request()
2475 pkt_err(pd, "wrong bio size\n"); pkt_make_request()
2479 blk_queue_bounce(q, &bio); pkt_make_request()
2482 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_make_request()
2483 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); pkt_make_request()
2488 split = bio_split(bio, last_zone - pkt_make_request()
2489 bio->bi_iter.bi_sector, pkt_make_request()
2491 bio_chain(split, bio); pkt_make_request()
2493 split = bio; pkt_make_request()
2497 } while (split != bio); pkt_make_request()
2501 bio_io_error(bio); pkt_make_request()
2516 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet pkt_merge_bvec()
2517 * boundary, pkt_make_request() will split the bio. pkt_merge_bvec()
H A Dosdblk.c98 struct bio *bio; /* cloned bio */ member in struct:osdblk_request
252 static void bio_chain_put(struct bio *chain) bio_chain_put()
254 struct bio *tmp; bio_chain_put()
264 static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) bio_chain_clone()
266 struct bio *tmp, *new_chain = NULL, *tail = NULL; bio_chain_clone()
303 struct bio *bio; osdblk_rq_fn() local
327 if (!do_flush) { /* osd_flush does not use a bio */ osdblk_rq_fn()
328 /* a bio clone to be passed down to OSD request */ osdblk_rq_fn()
329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC); osdblk_rq_fn()
330 if (!bio) osdblk_rq_fn()
333 bio = NULL; osdblk_rq_fn()
338 bio_chain_put(bio); osdblk_rq_fn()
345 orq->bio = bio; osdblk_rq_fn()
354 bio, blk_rq_bytes(rq)); osdblk_rq_fn()
357 bio, blk_rq_bytes(rq)); osdblk_rq_fn()
369 bio_chain_put(bio); osdblk_rq_fn()
H A Dpmem.c60 static void pmem_make_request(struct request_queue *q, struct bio *bio) pmem_make_request() argument
62 struct block_device *bdev = bio->bi_bdev; pmem_make_request()
70 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) { pmem_make_request()
75 BUG_ON(bio->bi_rw & REQ_DISCARD); pmem_make_request()
77 rw = bio_data_dir(bio); pmem_make_request()
78 sector = bio->bi_iter.bi_sector; bio_for_each_segment()
79 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
86 bio_endio(bio, err);
H A Dbrd.c16 #include <linux/bio.h>
297 * Process a single bvec of a bio.
326 static void brd_make_request(struct request_queue *q, struct bio *bio) brd_make_request() argument
328 struct block_device *bdev = bio->bi_bdev; brd_make_request()
336 sector = bio->bi_iter.bi_sector; brd_make_request()
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) brd_make_request()
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { brd_make_request()
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size); brd_make_request()
346 rw = bio_rw(bio); brd_make_request()
350 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
360 bio_endio(bio, err);
H A Dps3vram.c550 static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, ps3vram_do_bio() argument
551 struct bio *bio) ps3vram_do_bio()
554 int write = bio_data_dir(bio) == WRITE; ps3vram_do_bio()
556 loff_t offset = bio->bi_iter.bi_sector << 9; ps3vram_do_bio()
560 struct bio *next; ps3vram_do_bio()
562 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
596 bio_endio(bio, error);
600 static void ps3vram_make_request(struct request_queue *q, struct bio *bio) ps3vram_make_request() argument
610 bio_list_add(&priv->list, bio); ps3vram_make_request()
617 bio = ps3vram_do_bio(dev, bio); ps3vram_make_request()
618 } while (bio); ps3vram_make_request()
H A Dnull_blk.c17 struct bio *bio; member in struct:nullb_cmd
108 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
225 bio_endio(cmd->bio, 0); end_cmd()
313 static void null_queue_bio(struct request_queue *q, struct bio *bio) null_queue_bio() argument
320 cmd->bio = bio; null_queue_bio()
H A Dloop.h12 #include <linux/bio.h>
H A Dxen-blkfront.c83 struct bio *bio; member in struct:split_bio
1445 static void split_bio_end(struct bio *bio, int error) split_bio_end() argument
1447 struct split_bio *split_bio = bio->bi_private; split_bio_end()
1453 split_bio->bio->bi_phys_segments = 0; split_bio_end()
1454 bio_endio(split_bio->bio, split_bio->err); split_bio_end()
1457 bio_put(bio); split_bio_end()
1466 struct bio *bio, *cloned_bio; blkif_recover() local
1513 merge_bio.head = copy[i].request->bio; blkif_recover()
1516 copy[i].request->bio = NULL; blkif_recover()
1534 merge_bio.head = req->bio; blkif_recover()
1537 req->bio = NULL; blkif_recover()
1562 while ((bio = bio_list_pop(&bio_list)) != NULL) { blkif_recover()
1564 if (bio_segments(bio) > segs) { blkif_recover()
1566 * This bio has more segments than what we can blkif_recover()
1569 pending = (bio_segments(bio) + segs - 1) / segs; blkif_recover()
1573 split_bio->bio = bio; blkif_recover()
1577 (unsigned int)bio_sectors(bio) - offset); blkif_recover()
1578 cloned_bio = bio_clone(bio, GFP_NOIO); blkif_recover()
1587 * end, so we can also end the "parent" bio. blkif_recover()
1591 /* We don't need to split this bio */ blkif_recover()
1592 submit_bio(bio->bi_rw, bio); blkif_recover()
H A Drbd.c54 * number of contexts in Linux (blk, bio, genhd), but the default is
264 struct bio *bio_list;
877 /* The bio layer requires at least sector-sized I/O */ rbd_dev_ondisk_valid()
1231 * bio helpers
1234 static void bio_chain_put(struct bio *chain) bio_chain_put()
1236 struct bio *tmp; bio_chain_put()
1246 * zeros a bio chain, starting at specific offset
1248 static void zero_bio_chain(struct bio *chain, int start_ofs) zero_bio_chain()
1306 * Clone a portion of a bio, starting at the given byte offset
1309 static struct bio *bio_clone_range(struct bio *bio_src, bio_clone_range()
1314 struct bio *bio; bio_clone_range() local
1316 bio = bio_clone(bio_src, gfpmask); bio_clone_range()
1317 if (!bio) bio_clone_range()
1320 bio_advance(bio, offset); bio_clone_range()
1321 bio->bi_iter.bi_size = len; bio_clone_range()
1323 return bio; bio_clone_range()
1327 * Clone a portion of a bio chain, starting at the given byte offset
1328 * into the first bio in the source chain and continuing for the
1329 * number of bytes indicated. The result is another bio chain of
1333 * refer to the first source bio and the offset into that bio where
1336 * On return, bio_src is updated to refer to the bio in the source
1338 * contain the offset of that byte within that bio.
1340 static struct bio *bio_chain_clone_range(struct bio **bio_src, bio_chain_clone_range()
1345 struct bio *bi = *bio_src; bio_chain_clone_range()
1347 struct bio *chain = NULL; bio_chain_clone_range()
1348 struct bio **end; bio_chain_clone_range()
1358 struct bio *bio; bio_chain_clone_range() local
1362 goto out_err; /* EINVAL; ran out of bio's */ bio_chain_clone_range()
1365 bio = bio_clone_range(bi, off, bi_size, gfpmask); bio_chain_clone_range()
1366 if (!bio) bio_chain_clone_range()
1369 *end = bio; bio_chain_clone_range()
1370 end = &bio->bi_next; bio_chain_clone_range()
2417 * "data_desc" is the pointer to the head of a list of bio
2429 struct bio *bio_list = NULL; rbd_img_request_fill()
3427 rq->bio); rbd_queue_workfn()
3459 * a queue callback. Makes sure that we don't create a bio that spans across
3474 * bio start sector is to offset relative to the enclosing rbd_merge_bvec()
3483 * of the object. Account for what's already used by the bio. rbd_merge_bvec()
3492 * Don't send back more than was asked for. And if the bio rbd_merge_bvec()
3495 * added to an empty bio." rbd_merge_bvec()
H A Dvirtio_blk.c240 struct bio *bio; virtblk_get_id() local
243 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, virtblk_get_id()
245 if (IS_ERR(bio)) virtblk_get_id()
246 return PTR_ERR(bio); virtblk_get_id()
248 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); virtblk_get_id()
250 bio_put(bio); virtblk_get_id()
H A Dhd.c467 insw(HD_DATA, bio_data(req->bio), 256); read_intr()
471 blk_rq_sectors(req) - 1, bio_data(req->bio)+512); read_intr()
508 outsw(HD_DATA, bio_data(req->bio), 256); write_intr()
627 cyl, head, sec, nsect, bio_data(req->bio)); hd_request()
646 outsw(HD_DATA, bio_data(req->bio), 256); hd_request()
H A Dps3disk.c104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n", rq_for_each_segment()
105 __func__, __LINE__, i, bio_sectors(iter.bio), rq_for_each_segment()
106 iter.bio->bi_iter.bi_sector); rq_for_each_segment()
H A Dfloppy.c180 #include <linux/bio.h>
2354 raw_cmd->kernel_data == bio_data(current_req->bio)) { rw_interrupt()
2373 base = bio_data(current_req->bio); buffer_chain_size()
2643 } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { make_raw_rw_request()
2657 ((unsigned long)bio_data(current_req->bio))) >> 9; make_raw_rw_request()
2661 if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) make_raw_rw_request()
2663 ((unsigned long)bio_data(current_req->bio)) % make_raw_rw_request()
2680 raw_cmd->kernel_data = bio_data(current_req->bio); make_raw_rw_request()
2734 (raw_cmd->kernel_data != bio_data(current_req->bio) && make_raw_rw_request()
2742 if (raw_cmd->kernel_data != bio_data(current_req->bio)) make_raw_rw_request()
2759 if (raw_cmd->kernel_data != bio_data(current_req->bio)) { make_raw_rw_request()
3774 static void floppy_rb0_cb(struct bio *bio, int err) floppy_rb0_cb() argument
3776 struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; floppy_rb0_cb()
3788 struct bio bio; __floppy_read_block_0() local
3806 bio_init(&bio); __floppy_read_block_0()
3807 bio.bi_io_vec = &bio_vec; __floppy_read_block_0()
3811 bio.bi_vcnt = 1; __floppy_read_block_0()
3812 bio.bi_iter.bi_size = size; __floppy_read_block_0()
3813 bio.bi_bdev = bdev; __floppy_read_block_0()
3814 bio.bi_iter.bi_sector = 0; __floppy_read_block_0()
3815 bio.bi_flags |= (1 << BIO_QUIET); __floppy_read_block_0()
3816 bio.bi_private = &cbdata; __floppy_read_block_0()
3817 bio.bi_end_io = floppy_rb0_cb; __floppy_read_block_0()
3819 submit_bio(READ, &bio); __floppy_read_block_0()
H A Dloop.c296 struct bio *bio; rq_for_each_segment() local
298 __rq_for_each_bio(bio, rq) rq_for_each_segment()
299 zero_fill_bio(bio); rq_for_each_segment()
344 struct bio *bio; rq_for_each_segment() local
346 __rq_for_each_bio(bio, rq) rq_for_each_segment()
347 zero_fill_bio(bio); rq_for_each_segment()
H A Dmg_disk.c482 u16 *buff = (u16 *)bio_data(req->bio); mg_read_one()
499 blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); mg_read()
517 u16 *buff = (u16 *)bio_data(req->bio); mg_write_one()
537 rem, blk_rq_pos(req), bio_data(req->bio)); mg_write()
588 blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); mg_read_intr()
627 blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); mg_write_intr()
H A Dz2ram.c90 void *buffer = bio_data(req->bio); do_z2_request()
/linux-4.1.27/drivers/scsi/
H A Dsd_dif.c112 struct bio *bio; sd_dif_prepare() local
124 __rq_for_each_bio(bio, scmd->request) { sd_dif_prepare()
125 struct bio_integrity_payload *bip = bio_integrity(bio); sd_dif_prepare()
163 struct bio *bio; sd_dif_complete() local
176 __rq_for_each_bio(bio, scmd->request) { sd_dif_complete()
177 struct bio_integrity_payload *bip = bio_integrity(bio); sd_dif_complete()
H A Dst.h32 struct bio *bio; member in struct:st_request
H A Dsd.c39 #include <linux/bio.h>
597 struct bio *bio = scmd->request->bio; sd_setup_protect_cmnd() local
602 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) sd_setup_protect_cmnd()
605 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) sd_setup_protect_cmnd()
612 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) sd_setup_protect_cmnd()
619 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) sd_setup_protect_cmnd()
820 struct bio *bio = rq->bio; sd_setup_write_same_cmnd() local
829 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); sd_setup_write_same_cmnd()
/linux-4.1.27/drivers/scsi/osd/
H A Dosd_initiator.c450 if (unlikely(rq->bio)) _put_request()
718 struct bio *bio; _osd_req_list_objects() local
725 WARN_ON(or->in.bio); _osd_req_list_objects()
726 bio = bio_map_kern(q, list, len, or->alloc_flags); _osd_req_list_objects()
727 if (IS_ERR(bio)) { _osd_req_list_objects()
729 return PTR_ERR(bio); _osd_req_list_objects()
732 bio->bi_rw &= ~REQ_WRITE; _osd_req_list_objects()
733 or->in.bio = bio; _osd_req_list_objects()
734 or->in.total_bytes = bio->bi_iter.bi_size; _osd_req_list_objects()
826 struct bio *bio, u64 len) osd_req_write()
829 WARN_ON(or->out.bio || or->out.total_bytes); osd_req_write()
830 WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); osd_req_write()
831 or->out.bio = bio; osd_req_write()
840 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); osd_req_write_kern() local
842 if (IS_ERR(bio)) osd_req_write_kern()
843 return PTR_ERR(bio); osd_req_write_kern()
845 bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ osd_req_write_kern()
846 osd_req_write(or, obj, offset, bio, len); osd_req_write_kern()
852 const struct osd_obj_id *, struct bio *data_out); */
854 const struct osd_obj_id *, struct bio *data_out, u64 offset); */
877 struct bio *bio, u64 len) osd_req_read()
880 WARN_ON(or->in.bio || or->in.total_bytes); osd_req_read()
881 WARN_ON(bio->bi_rw & REQ_WRITE); osd_req_read()
882 or->in.bio = bio; osd_req_read()
891 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); osd_req_read_kern() local
893 if (IS_ERR(bio)) osd_req_read_kern()
894 return PTR_ERR(bio); osd_req_read_kern()
896 osd_req_read(or, obj, offset, bio, len); osd_req_read_kern()
945 struct bio *bio; _osd_req_finalize_cdb_cont() local
956 /* create a bio for continuation segment */ _osd_req_finalize_cdb_cont()
957 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, _osd_req_finalize_cdb_cont()
959 if (IS_ERR(bio)) _osd_req_finalize_cdb_cont()
960 return PTR_ERR(bio); _osd_req_finalize_cdb_cont()
962 bio->bi_rw |= REQ_WRITE; _osd_req_finalize_cdb_cont()
964 /* integrity check the continuation before the bio is linked _osd_req_finalize_cdb_cont()
968 osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key); _osd_req_finalize_cdb_cont()
973 * continuation bio to the head of the bio list - the _osd_req_finalize_cdb_cont()
977 bio->bi_next = or->out.bio; _osd_req_finalize_cdb_cont()
978 or->out.bio = bio; _osd_req_finalize_cdb_cont()
984 /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
988 * total_bytes(sglist) >= total_bytes(bio)
991 const struct osd_obj_id *obj, struct bio *bio, osd_req_write_sg()
999 osd_req_write(or, obj, 0, bio, len); osd_req_write_sg()
1005 /* osd_req_read_sg: Read multiple extents of an object into @bio
1009 const struct osd_obj_id *obj, struct bio *bio, osd_req_read_sg()
1029 osd_req_read(or, obj, off, bio, len); osd_req_read_sg()
1043 static struct bio *_create_sg_bios(struct osd_request *or, _create_sg_bios()
1047 struct bio *bio; _create_sg_bios() local
1050 bio = bio_kmalloc(GFP_KERNEL, numentries); _create_sg_bios()
1051 if (unlikely(!bio)) { _create_sg_bios()
1063 added_len = bio_add_pc_page(q, bio, page, len, offset); _create_sg_bios()
1067 bio_put(bio); _create_sg_bios()
1072 return bio; _create_sg_bios()
1079 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); osd_req_write_sg_kern() local
1080 if (IS_ERR(bio)) osd_req_write_sg_kern()
1081 return PTR_ERR(bio); osd_req_write_sg_kern()
1083 bio->bi_rw |= REQ_WRITE; osd_req_write_sg_kern()
1084 osd_req_write_sg(or, obj, bio, sglist, numentries); osd_req_write_sg_kern()
1094 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); osd_req_read_sg_kern() local
1095 if (IS_ERR(bio)) osd_req_read_sg_kern()
1096 return PTR_ERR(bio); osd_req_read_sg_kern()
1098 osd_req_read_sg(or, obj, bio, sglist, numentries); osd_req_read_sg_kern()
1501 bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes, _osd_req_finalize_data_integrity()
1564 if (oii->bio) _make_request()
1565 return blk_make_request(q, oii->bio, flags); _make_request()
1630 struct bio *out_data_bio = or->out.bio; osd_finalize_request()
1645 has_in = or->in.bio || or->get_attr.total_bytes; osd_finalize_request()
1646 has_out = or->out.bio || or->cdb_cont.total_bytes || osd_finalize_request()
2027 struct bio *bio __unused, const u8 *cap_key __unused) osd_sec_sign_data()
824 osd_req_write(struct osd_request *or, const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len) osd_req_write() argument
875 osd_req_read(struct osd_request *or, const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len) osd_req_read() argument
990 osd_req_write_sg(struct osd_request *or, const struct osd_obj_id *obj, struct bio *bio, const struct osd_sg_entry *sglist, unsigned numentries) osd_req_write_sg() argument
1008 osd_req_read_sg(struct osd_request *or, const struct osd_obj_id *obj, struct bio *bio, const struct osd_sg_entry *sglist, unsigned numentries) osd_req_read_sg() argument
/linux-4.1.27/drivers/block/aoe/
H A Daoecmd.c297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) skb_fillup() argument
302 __bio_for_each_segment(bv, bio, iter, iter) skb_fillup()
352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { ata_rw_frameinit()
353 skb_fillup(skb, f->buf->bio, f->iter); ata_rw_frameinit()
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size); aoecmd_ata_rw()
848 struct bio *bio; rqbiocnt() local
851 __rq_for_each_bio(bio, r) rqbiocnt()
867 bio_pageinc(struct bio *bio) bio_pageinc() argument
873 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
883 bio_pagedec(struct bio *bio) bio_pagedec() argument
889 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
896 bufinit(struct buf *buf, struct request *rq, struct bio *bio) bufinit() argument
900 buf->bio = bio; bufinit()
901 buf->iter = bio->bi_iter; bufinit()
902 bio_pageinc(bio); bufinit()
911 struct bio *bio; nextbuf() local
925 d->ip.nxbio = rq->bio; nextbuf()
933 bio = d->ip.nxbio; nextbuf()
934 bufinit(buf, rq, bio); nextbuf()
935 bio = bio->bi_next; nextbuf()
936 d->ip.nxbio = bio; nextbuf()
937 if (bio == NULL) nextbuf()
1087 bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt) bvcpy() argument
1094 __bio_for_each_segment(bv, bio, iter, iter) { __bio_for_each_segment()
1104 struct bio *bio; aoe_end_request() local
1112 bio = rq->bio; aoe_end_request()
1113 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); aoe_end_request()
1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); aoe_end_request()
1130 bio_pagedec(buf->bio); aoe_end_buf()
1175 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); ktiocomplete()
1188 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); ktiocomplete()
1196 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); ktiocomplete()
1199 bvcpy(skb, f->buf->bio, f->iter, n); ktiocomplete()
1698 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); aoe_failbuf()
H A Daoedev.c164 struct bio *bio; aoe_failip() local
172 while ((bio = d->ip.nxbio)) { aoe_failip()
173 clear_bit(BIO_UPTODATE, &bio->bi_flags); aoe_failip()
174 d->ip.nxbio = bio->bi_next; aoe_failip()
H A Daoe.h103 struct bio *bio; member in struct:buf
176 struct bio *nxbio;
/linux-4.1.27/arch/xtensa/platforms/iss/
H A Dsimdisk.c18 #include <linux/bio.h>
104 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) simdisk_xfer_bio() argument
108 sector_t sector = bio->bi_iter.bi_sector; simdisk_xfer_bio()
110 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
111 char *buffer = __bio_kmap_atomic(bio, iter); bio_for_each_segment()
115 bio_data_dir(bio) == WRITE); bio_for_each_segment()
122 static void simdisk_make_request(struct request_queue *q, struct bio *bio) simdisk_make_request() argument
125 int status = simdisk_xfer_bio(dev, bio); simdisk_make_request()
126 bio_endio(bio, status); simdisk_make_request()
/linux-4.1.27/drivers/s390/block/
H A Dxpram.c40 #include <linux/bio.h>
184 static void xpram_make_request(struct request_queue *q, struct bio *bio) xpram_make_request() argument
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; xpram_make_request()
193 if ((bio->bi_iter.bi_sector & 7) != 0 || xpram_make_request()
194 (bio->bi_iter.bi_size & 4095) != 0) xpram_make_request()
197 if ((bio->bi_iter.bi_size >> 12) > xdev->size) xpram_make_request()
200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) xpram_make_request()
202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; bio_for_each_segment()
203 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
211 if (bio_data_dir(bio) == READ) { bio_for_each_segment()
223 set_bit(BIO_UPTODATE, &bio->bi_flags);
224 bio_endio(bio, 0);
227 bio_io_error(bio);
H A Ddcssblk.c30 static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
819 dcssblk_make_request(struct request_queue *q, struct bio *bio) dcssblk_make_request() argument
830 dev_info = bio->bi_bdev->bd_disk->private_data; dcssblk_make_request()
833 if ((bio->bi_iter.bi_sector & 7) != 0 || dcssblk_make_request()
834 (bio->bi_iter.bi_size & 4095) != 0) dcssblk_make_request()
837 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { dcssblk_make_request()
848 if (bio_data_dir(bio) == WRITE) { dcssblk_make_request()
857 index = (bio->bi_iter.bi_sector >> 3); bio_for_each_segment()
858 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
865 if (bio_data_dir(bio) == READ) { bio_for_each_segment()
874 bio_endio(bio, 0);
877 bio_io_error(bio);
H A Ddasd_diag.c17 #include <linux/bio.h>
60 struct dasd_diag_bio bio[0]; member in struct:dasd_diag_req
184 private->iob.bio_list = dreq->bio; dasd_start_diag()
319 struct dasd_diag_bio bio; dasd_diag_check_device() local
402 memset(&bio, 0, sizeof (struct dasd_diag_bio)); dasd_diag_check_device()
403 bio.type = MDSK_READ_REQ; dasd_diag_check_device()
404 bio.block_number = private->pt_block + 1; dasd_diag_check_device()
405 bio.buffer = label; dasd_diag_check_device()
412 private->iob.bio_list = &bio; dasd_diag_check_device()
525 /* Check struct bio and count the number of blocks for the request. */ dasd_diag_build_cp()
545 dbio = dreq->bio;
/linux-4.1.27/arch/powerpc/sysdev/
H A Daxonram.c26 #include <linux/bio.h>
104 * @queue, @bio: see blk_queue_make_request()
107 axon_ram_make_request(struct request_queue *queue, struct bio *bio) axon_ram_make_request() argument
109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; axon_ram_make_request()
116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << axon_ram_make_request()
120 bio_for_each_segment(vec, bio, iter) { bio_for_each_segment()
122 bio_io_error(bio); bio_for_each_segment()
127 if (bio_data_dir(bio) == READ) bio_for_each_segment()
135 bio_endio(bio, 0);
/linux-4.1.27/kernel/trace/
H A Dblktrace.c709 * Records an action against a request. Will log the bio offset + size.
765 * blk_add_trace_bio - Add a trace for a bio oriented action
767 * @bio: the source bio
772 * Records an action against a bio. Will log the bio offset + size.
775 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, blk_add_trace_bio() argument
783 if (!error && !bio_flagged(bio, BIO_UPTODATE)) blk_add_trace_bio()
786 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, blk_add_trace_bio()
787 bio->bi_rw, what, error, 0, NULL); blk_add_trace_bio()
791 struct request_queue *q, struct bio *bio) blk_add_trace_bio_bounce()
793 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); blk_add_trace_bio_bounce()
797 struct request_queue *q, struct bio *bio, blk_add_trace_bio_complete()
800 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); blk_add_trace_bio_complete()
806 struct bio *bio) blk_add_trace_bio_backmerge()
808 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); blk_add_trace_bio_backmerge()
814 struct bio *bio) blk_add_trace_bio_frontmerge()
816 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); blk_add_trace_bio_frontmerge()
820 struct request_queue *q, struct bio *bio) blk_add_trace_bio_queue()
822 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); blk_add_trace_bio_queue()
827 struct bio *bio, int rw) blk_add_trace_getrq()
829 if (bio) blk_add_trace_getrq()
830 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); blk_add_trace_getrq()
842 struct bio *bio, int rw) blk_add_trace_sleeprq()
844 if (bio) blk_add_trace_sleeprq()
845 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); blk_add_trace_sleeprq()
882 struct request_queue *q, struct bio *bio, blk_add_trace_split()
890 __blk_add_trace(bt, bio->bi_iter.bi_sector, blk_add_trace_split()
891 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, blk_add_trace_split()
892 !bio_flagged(bio, BIO_UPTODATE), blk_add_trace_split()
898 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
901 * @bio: the source bio
906 * Device mapper or raid target sometimes need to split a bio because
911 struct request_queue *q, struct bio *bio, blk_add_trace_bio_remap()
921 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); blk_add_trace_bio_remap()
924 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, blk_add_trace_bio_remap()
925 bio->bi_rw, BLK_TA_REMAP, blk_add_trace_bio_remap()
926 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); blk_add_trace_bio_remap()
790 blk_add_trace_bio_bounce(void *ignore, struct request_queue *q, struct bio *bio) blk_add_trace_bio_bounce() argument
796 blk_add_trace_bio_complete(void *ignore, struct request_queue *q, struct bio *bio, int error) blk_add_trace_bio_complete() argument
803 blk_add_trace_bio_backmerge(void *ignore, struct request_queue *q, struct request *rq, struct bio *bio) blk_add_trace_bio_backmerge() argument
811 blk_add_trace_bio_frontmerge(void *ignore, struct request_queue *q, struct request *rq, struct bio *bio) blk_add_trace_bio_frontmerge() argument
819 blk_add_trace_bio_queue(void *ignore, struct request_queue *q, struct bio *bio) blk_add_trace_bio_queue() argument
825 blk_add_trace_getrq(void *ignore, struct request_queue *q, struct bio *bio, int rw) blk_add_trace_getrq() argument
840 blk_add_trace_sleeprq(void *ignore, struct request_queue *q, struct bio *bio, int rw) blk_add_trace_sleeprq() argument
881 blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) blk_add_trace_split() argument
910 blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) blk_add_trace_bio_remap() argument
/linux-4.1.27/arch/m68k/emu/
H A Dnfblock.c62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio) nfhd_make_request() argument
68 sector_t sec = bio->bi_iter.bi_sector; nfhd_make_request()
70 dir = bio_data_dir(bio); nfhd_make_request()
72 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
79 bio_endio(bio, 0);
/linux-4.1.27/include/linux/ceph/
H A Dmessenger.h78 CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
103 struct bio *bio; member in struct:ceph_msg_data::__anon11535::__anon11536
126 struct { /* bio */
127 struct bio *bio; /* bio from list */ member in struct:ceph_msg_data_cursor::__anon11538::__anon11539
291 extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
H A Dosd_client.h70 struct bio *bio; /* list of bios */ member in struct:ceph_osd_data::__anon11543::__anon11545
285 struct bio *bio, size_t bio_length);
/linux-4.1.27/fs/hfsplus/
H A Dwrapper.c49 struct bio *bio; hfsplus_submit_bio() local
65 bio = bio_alloc(GFP_NOIO, 1); hfsplus_submit_bio()
66 bio->bi_iter.bi_sector = sector; hfsplus_submit_bio()
67 bio->bi_bdev = sb->s_bdev; hfsplus_submit_bio()
77 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); hfsplus_submit_bio()
86 ret = submit_bio_wait(rw, bio); hfsplus_submit_bio()
88 bio_put(bio); hfsplus_submit_bio()
/linux-4.1.27/include/scsi/
H A Dosd_sec.h40 void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
H A Dosd_initiator.h143 struct bio *bio; member in struct:osd_request::_osd_io_info
430 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
434 const struct osd_obj_id *, struct bio *data_out);/* NI */
436 const struct osd_obj_id *, struct bio *data_out, u64 offset);/* NI */
447 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
453 const struct osd_obj_id *obj, struct bio *bio,
456 const struct osd_obj_id *obj, struct bio *bio,
H A Dosd_ore.h158 struct bio *bio; member in struct:ore_io_state::ore_per_dev_state
/linux-4.1.27/fs/exofs/
H A Dore.c47 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
328 if (per_dev->bio) ore_put_io_state()
329 bio_put(per_dev->bio); ore_put_io_state()
405 static void _clear_bio(struct bio *bio) _clear_bio() argument
410 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
440 per_dev->bio) { ore_check_io()
442 * Note: if we do not have bio it means read-attributes ore_check_io()
445 _clear_bio(per_dev->bio); ore_check_io()
603 if (per_dev->bio == NULL) { _ore_add_stripe_unit()
615 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); _ore_add_stripe_unit()
616 if (unlikely(!per_dev->bio)) { _ore_add_stripe_unit()
630 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg], _ore_add_stripe_unit()
636 per_dev->bio->bi_vcnt, _ore_add_stripe_unit()
637 per_dev->bio->bi_max_vecs, _ore_add_stripe_unit()
654 * bio than the CDB requested length (per_dev->length). That's fine _ore_add_stripe_unit()
859 struct bio *bio; _write_mirror() local
862 bio = bio_clone_kmalloc(master_dev->bio, _write_mirror()
864 if (unlikely(!bio)) { _write_mirror()
867 master_dev->bio->bi_max_vecs); _write_mirror()
872 bio->bi_bdev = NULL; _write_mirror()
873 bio->bi_next = NULL; _write_mirror()
876 per_dev->bio = bio; _write_mirror()
879 bio = master_dev->bio; _write_mirror()
881 bio->bi_rw |= REQ_WRITE; _write_mirror()
885 per_dev->offset, bio, per_dev->length); _write_mirror()
983 osd_req_read_sg(or, obj, per_dev->bio, _ore_read_mirror()
988 per_dev->bio, per_dev->length); _ore_read_mirror()
H A Dore_raid.c360 per_dev->bio = bio_kmalloc(GFP_KERNEL, _add_to_r4w()
362 if (unlikely(!per_dev->bio)) { _add_to_r4w()
375 added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len, _add_to_r4w()
379 per_dev->bio->bi_vcnt); _add_to_r4w()
435 struct bio *bio = ios->per_dev[d].bio; _mark_read4write_pages_uptodate() local
437 if (!bio) _mark_read4write_pages_uptodate()
440 bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all()
599 /* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change _read_4_write_execute()
600 * to check for per_dev->bio _read_4_write_execute()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_req.h260 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) drbd_req_make_private_bio()
262 struct bio *bio; drbd_req_make_private_bio() local
263 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ drbd_req_make_private_bio()
265 req->private_bio = bio; drbd_req_make_private_bio()
267 bio->bi_private = req; drbd_req_make_private_bio()
268 bio->bi_end_io = drbd_request_endio; drbd_req_make_private_bio()
269 bio->bi_next = NULL; drbd_req_make_private_bio()
274 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
276 struct bio *bio; member in struct:bio_and_error
306 if (m.bio) _req_mod()
312 /* completion of master bio is outside of our spinlock.
328 if (m.bio) req_mod()
H A Ddrbd_req.c51 struct bio *bio_src) drbd_req_new()
204 bio_endio(m->bio, m->error); complete_master_bio()
210 * Set m->bio to the master bio, if it is fit to be completed,
213 * If m->bio is set, the error status to be returned is placed in m->error.
223 /* we must not complete the master bio, while it is drbd_req_complete()
295 m->bio = req->master_bio; drbd_req_complete()
587 m->bio = NULL; __req_mod()
1140 struct bio *bio = req->private_bio; drbd_submit_req_private_bio() local
1141 const int rw = bio_rw(bio); drbd_submit_req_private_bio()
1143 bio->bi_bdev = device->ldev->backing_bdev; drbd_submit_req_private_bio()
1149 * this bio. */ drbd_submit_req_private_bio()
1156 bio_endio(bio, -EIO); drbd_submit_req_private_bio()
1158 generic_make_request(bio); drbd_submit_req_private_bio()
1161 bio_endio(bio, -EIO); drbd_submit_req_private_bio()
1182 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) drbd_request_prepare() argument
1184 const int rw = bio_data_dir(bio); drbd_request_prepare()
1188 req = drbd_req_new(device, bio); drbd_request_prepare()
1194 bio_endio(bio, -ENOMEM); drbd_request_prepare()
1314 * As long as we still need to submit our private bio, drbd_send_and_submit()
1316 * If however this request did not even have a private bio to submit drbd_send_and_submit()
1321 if (m.bio) drbd_send_and_submit()
1325 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) __drbd_make_request() argument
1327 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); __drbd_make_request()
1495 void drbd_make_request(struct request_queue *q, struct bio *bio) drbd_make_request() argument
1505 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); drbd_make_request()
1508 __drbd_make_request(device, bio, start_jif); drbd_make_request()
1518 * We also may need to enforce configured max-bio-bvecs limits.
H A Ddrbd_actlog.c142 struct bio *bio; _drbd_md_sync_page_io() local
154 bio = bio_alloc_drbd(GFP_NOIO); _drbd_md_sync_page_io()
155 bio->bi_bdev = bdev->md_bdev; _drbd_md_sync_page_io()
156 bio->bi_iter.bi_sector = sector; _drbd_md_sync_page_io()
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size) _drbd_md_sync_page_io()
160 bio->bi_private = device; _drbd_md_sync_page_io()
161 bio->bi_end_io = drbd_md_endio; _drbd_md_sync_page_io()
162 bio->bi_rw = rw; _drbd_md_sync_page_io()
174 bio_get(bio); /* one bio_put() is in the completion handler */ _drbd_md_sync_page_io()
178 bio_endio(bio, -EIO); _drbd_md_sync_page_io()
180 submit_bio(rw, bio); _drbd_md_sync_page_io()
182 if (bio_flagged(bio, BIO_UPTODATE)) _drbd_md_sync_page_io()
186 bio_put(bio); _drbd_md_sync_page_io()
H A Ddrbd_worker.c68 void drbd_md_endio(struct bio *bio, int error) drbd_md_endio() argument
72 device = bio->bi_private; drbd_md_endio()
89 bio_put(bio); drbd_md_endio()
173 void drbd_peer_request_endio(struct bio *bio, int error) drbd_peer_request_endio() argument
175 struct drbd_peer_request *peer_req = bio->bi_private; drbd_peer_request_endio()
177 int uptodate = bio_flagged(bio, BIO_UPTODATE); drbd_peer_request_endio()
178 int is_write = bio_data_dir(bio) == WRITE; drbd_peer_request_endio()
179 int is_discard = !!(bio->bi_rw & REQ_DISCARD); drbd_peer_request_endio()
200 bio_put(bio); /* no need for the bio anymore */ drbd_peer_request_endio()
211 void drbd_request_endio(struct bio *bio, int error) drbd_request_endio() argument
214 struct drbd_request *req = bio->bi_private; drbd_request_endio()
218 int uptodate = bio_flagged(bio, BIO_UPTODATE); drbd_request_endio()
222 bio_data_dir(bio) == WRITE ? "write" : "read"); drbd_request_endio()
268 if (bio->bi_rw & REQ_DISCARD) drbd_request_endio()
273 what = (bio_data_dir(bio) == WRITE) drbd_request_endio()
275 : (bio_rw(bio) == READ) drbd_request_endio()
290 if (m.bio) drbd_request_endio()
321 void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest) drbd_csum_bio() argument
334 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
H A Ddrbd_int.h299 * if local IO _is_ allowed, holds the locally submitted bio clone,
302 struct bio *private_bio;
317 struct bio *master_bio; /* master bio pointer */
346 * how long did it take to complete the master bio
349 * how long the master bio was blocked until we finally allocated
1420 /* We also need to make sure we get a bio
1424 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1449 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1450 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
1483 extern void drbd_md_endio(struct bio *bio, int error);
1484 extern void drbd_peer_request_endio(struct bio *bio, int error);
1485 extern void drbd_request_endio(struct bio *bio, int error);
1514 extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1598 * used to submit our private bio
1601 int fault_type, struct bio *bio) drbd_generic_make_request()
1604 if (!bio->bi_bdev) { drbd_generic_make_request()
1605 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); drbd_generic_make_request()
1606 bio_endio(bio, -ENODEV); drbd_generic_make_request()
1611 bio_endio(bio, -EIO); drbd_generic_make_request()
1613 generic_make_request(bio); drbd_generic_make_request()
1600 drbd_generic_make_request(struct drbd_device *device, int fault_type, struct bio *bio) drbd_generic_make_request() argument
H A Ddrbd_bitmap.c944 static void drbd_bm_endio(struct bio *bio, int error) drbd_bm_endio() argument
946 struct drbd_bm_aio_ctx *ctx = bio->bi_private; drbd_bm_endio()
949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); drbd_bm_endio()
950 int uptodate = bio_flagged(bio, BIO_UPTODATE); drbd_bm_endio()
982 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); drbd_bm_endio()
984 bio_put(bio); drbd_bm_endio()
995 struct bio *bio = bio_alloc_drbd(GFP_NOIO); __must_hold() local
1024 bio->bi_bdev = device->ldev->md_bdev; __must_hold()
1025 bio->bi_iter.bi_sector = on_disk_sector; __must_hold()
1026 /* bio_add_page of a single page to an empty bio will always succeed, __must_hold()
1028 bio_add_page(bio, page, len, 0); __must_hold()
1029 bio->bi_private = ctx; __must_hold()
1030 bio->bi_end_io = drbd_bm_endio; __must_hold()
1033 bio->bi_rw |= rw; __must_hold()
1034 bio_endio(bio, -EIO); __must_hold()
1036 submit_bio(rw, bio); __must_hold()
H A Ddrbd_receiver.c1353 * @rw: flag field, see bio->bi_rw
1361 * single page to an empty bio (which should never happen and likely indicates
1370 struct bio *bios = NULL; drbd_submit_peer_request()
1371 struct bio *bio; drbd_submit_peer_request() local
1403 /* In most cases, we will only need one bio. But in case the lower drbd_submit_peer_request()
1406 * request in more than one bio. drbd_submit_peer_request()
1409 * generated bio, but a bio allocated on behalf of the peer. drbd_submit_peer_request()
1412 bio = bio_alloc(GFP_NOIO, nr_pages); drbd_submit_peer_request()
1413 if (!bio) { drbd_submit_peer_request()
1414 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); drbd_submit_peer_request()
1417 /* > peer_req->i.sector, unless this is the first bio */ drbd_submit_peer_request()
1418 bio->bi_iter.bi_sector = sector; drbd_submit_peer_request()
1419 bio->bi_bdev = device->ldev->backing_bdev; drbd_submit_peer_request()
1420 bio->bi_rw = rw; drbd_submit_peer_request()
1421 bio->bi_private = peer_req; drbd_submit_peer_request()
1422 bio->bi_end_io = drbd_peer_request_endio; drbd_submit_peer_request()
1424 bio->bi_next = bios; drbd_submit_peer_request()
1425 bios = bio; drbd_submit_peer_request()
1429 bio->bi_iter.bi_size = data_size; drbd_submit_peer_request()
1435 if (!bio_add_page(bio, page, len, 0)) { page_chain_for_each()
1439 if (bio->bi_vcnt == 0) { page_chain_for_each()
1443 len, (uint64_t)bio->bi_iter.bi_sector); page_chain_for_each()
1462 bio = bios;
1464 bio->bi_next = NULL;
1466 drbd_generic_make_request(device, fault_type, bio);
1472 bio = bios;
1474 bio_put(bio);
1710 struct bio *bio; recv_dless_read() local
1728 bio = req->master_bio; recv_dless_read()
1729 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); recv_dless_read()
1731 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
1742 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
2154 if (m.bio) fail_postponed_requests()
5207 if (m.bio) validate_req_change_req_state()
5285 The master bio might already be completed, therefore the got_NegAck()
H A Ddrbd_main.c149 struct bio *bio_alloc_drbd(gfp_t gfp_mask) bio_alloc_drbd()
151 struct bio *bio; bio_alloc_drbd() local
156 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); bio_alloc_drbd()
157 if (!bio) bio_alloc_drbd()
159 return bio; bio_alloc_drbd()
1548 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio) _drbd_send_bio() argument
1554 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
1567 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio) _drbd_send_zc_bio() argument
1573 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
1672 * sure that even if the bio pages may still be modified, it drbd_send_dblock()
2212 /* One global retry thread, if we need to push back some bio and have it
2235 struct bio *bio = req->master_bio; do_retry() local
2271 __drbd_make_request(device, bio, start_jif); do_retry()
H A Ddrbd_protocol.h129 u32 size; /* == bio->bi_size */
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_host_smp.c239 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || sas_smp_host_handler()
240 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { sas_smp_host_handler()
258 buf = kmap_atomic(bio_page(req->bio)); sas_smp_host_handler()
260 kunmap_atomic(buf - bio_offset(req->bio)); sas_smp_host_handler()
373 buf = kmap_atomic(bio_page(rsp->bio)); sas_smp_host_handler()
375 flush_kernel_dcache_page(bio_page(rsp->bio)); sas_smp_host_handler()
376 kunmap_atomic(buf - bio_offset(rsp->bio)); sas_smp_host_handler()
/linux-4.1.27/fs/f2fs/
H A Ddata.c18 #include <linux/bio.h>
31 static void f2fs_read_end_io(struct bio *bio, int err) f2fs_read_end_io() argument
36 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
47 bio_put(bio);
50 static void f2fs_write_end_io(struct bio *bio, int err) f2fs_write_end_io() argument
52 struct f2fs_sb_info *sbi = bio->bi_private; f2fs_write_end_io()
56 bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all()
72 bio_put(bio);
78 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, __bio_alloc()
81 struct bio *bio; __bio_alloc() local
83 /* No failure on bio allocation */ __bio_alloc()
84 bio = bio_alloc(GFP_NOIO, npages); __bio_alloc()
86 bio->bi_bdev = sbi->sb->s_bdev; __bio_alloc()
87 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); __bio_alloc()
88 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; __bio_alloc()
89 bio->bi_private = sbi; __bio_alloc()
91 return bio; __bio_alloc()
98 if (!io->bio) __submit_merged_bio()
102 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); __submit_merged_bio()
104 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); __submit_merged_bio()
106 submit_bio(fio->rw, io->bio); __submit_merged_bio()
107 io->bio = NULL; __submit_merged_bio()
139 struct bio *bio; f2fs_submit_page_bio() local
144 /* Allocate a new bio */ f2fs_submit_page_bio()
145 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw)); f2fs_submit_page_bio()
147 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { f2fs_submit_page_bio()
148 bio_put(bio); f2fs_submit_page_bio()
153 submit_bio(fio->rw, bio); f2fs_submit_page_bio()
173 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || f2fs_submit_page_mbio()
177 if (io->bio == NULL) { f2fs_submit_page_mbio()
180 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); f2fs_submit_page_mbio()
184 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < f2fs_submit_page_mbio()
H A Dsegment.c13 #include <linux/bio.h>
304 struct bio *bio = bio_alloc(GFP_NOIO, 0); issue_flush_thread() local
311 bio->bi_bdev = sbi->sb->s_bdev; issue_flush_thread()
312 ret = submit_bio_wait(WRITE_FLUSH, bio); issue_flush_thread()
319 bio_put(bio); issue_flush_thread()
1313 if (!io->bio) is_merged_page()
1316 bio_for_each_segment_all(bvec, io->bio, i) { is_merged_page()
/linux-4.1.27/include/uapi/linux/
H A Dblktrace_api.h50 __BLK_TA_SPLIT, /* bio was split */
51 __BLK_TA_BOUNCE, /* bio was bounced */
52 __BLK_TA_REMAP, /* bio was remapped */
/linux-4.1.27/drivers/block/zram/
H A Dzram_drv.c24 #include <linux/bio.h>
505 /* Should NEVER happen. Return bio error if it does. */ zram_decompress_page()
547 /* Should NEVER happen. Return bio error if it does. */ zram_bvec_read()
741 int offset, struct bio *bio) zram_bio_discard()
743 size_t n = bio->bi_iter.bi_size; zram_bio_discard()
922 static void __zram_make_request(struct zram *zram, struct bio *bio) __zram_make_request() argument
929 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; __zram_make_request()
930 offset = (bio->bi_iter.bi_sector & __zram_make_request()
933 if (unlikely(bio->bi_rw & REQ_DISCARD)) { __zram_make_request()
934 zram_bio_discard(zram, index, offset, bio); __zram_make_request()
935 bio_endio(bio, 0); __zram_make_request()
939 rw = bio_data_dir(bio); bio_for_each_segment()
940 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
946 * zram page. Split the bio vector. bio_for_each_segment()
968 set_bit(BIO_UPTODATE, &bio->bi_flags);
969 bio_endio(bio, 0);
973 bio_io_error(bio);
979 static void zram_make_request(struct request_queue *queue, struct bio *bio) zram_make_request() argument
986 if (!valid_io_request(zram, bio->bi_iter.bi_sector, zram_make_request()
987 bio->bi_iter.bi_size)) { zram_make_request()
992 __zram_make_request(zram, bio); zram_make_request()
998 bio_io_error(bio); zram_make_request()
1048 * It causes resubmit the I/O with bio request by upper functions zram_rw_page()
1050 * bio->bi_end_io does things to handle the error zram_rw_page()
740 zram_bio_discard(struct zram *zram, u32 index, int offset, struct bio *bio) zram_bio_discard() argument
/linux-4.1.27/drivers/block/xen-blkback/
H A Dblkback.c1040 * Completion callback on the bio's. Called as bh->b_end_io()
1063 * If all of the bio's have completed it is time to unmap __end_block_io_op()
1072 * bio callback.
1074 static void end_block_io_op(struct bio *bio, int error) end_block_io_op() argument
1076 __end_block_io_op(bio->bi_private, error); end_block_io_op()
1077 bio_put(bio); end_block_io_op()
1185 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1195 struct bio *bio = NULL; dispatch_rw_block_io() local
1196 struct bio **biolist = pending_req->biolist; dispatch_rw_block_io()
1244 /* Haven't submitted any bio's yet. */ dispatch_rw_block_io()
1316 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. dispatch_rw_block_io()
1322 while ((bio == NULL) || dispatch_rw_block_io()
1323 (bio_add_page(bio, dispatch_rw_block_io()
1329 bio = bio_alloc(GFP_KERNEL, nr_iovecs); dispatch_rw_block_io()
1330 if (unlikely(bio == NULL)) dispatch_rw_block_io()
1333 biolist[nbio++] = bio; dispatch_rw_block_io()
1334 bio->bi_bdev = preq.bdev; dispatch_rw_block_io()
1335 bio->bi_private = pending_req; dispatch_rw_block_io()
1336 bio->bi_end_io = end_block_io_op; dispatch_rw_block_io()
1337 bio->bi_iter.bi_sector = preq.sector_number; dispatch_rw_block_io()
1344 if (!bio) { dispatch_rw_block_io()
1347 bio = bio_alloc(GFP_KERNEL, 0); dispatch_rw_block_io()
1348 if (unlikely(bio == NULL)) dispatch_rw_block_io()
1351 biolist[nbio++] = bio; dispatch_rw_block_io()
1352 bio->bi_bdev = preq.bdev; dispatch_rw_block_io()
1353 bio->bi_private = pending_req; dispatch_rw_block_io()
1354 bio->bi_end_io = end_block_io_op; dispatch_rw_block_io()
1377 /* Haven't submitted any bio's yet. */ dispatch_rw_block_io()
/linux-4.1.27/fs/xfs/
H A Dxfs_aops.c74 * release holds on the inode and bio, and finally free
354 struct bio *bio, xfs_end_bio()
357 xfs_ioend_t *ioend = bio->bi_private; xfs_end_bio()
359 ASSERT(atomic_read(&bio->bi_cnt) >= 1); xfs_end_bio()
360 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; xfs_end_bio()
362 /* Toss bio and pass work off to an xfsdatad thread */ xfs_end_bio()
363 bio->bi_private = NULL; xfs_end_bio()
364 bio->bi_end_io = NULL; xfs_end_bio()
365 bio_put(bio); xfs_end_bio()
374 struct bio *bio) xfs_submit_ioend_bio()
377 bio->bi_private = ioend; xfs_submit_ioend_bio()
378 bio->bi_end_io = xfs_end_bio; xfs_submit_ioend_bio()
379 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); xfs_submit_ioend_bio()
382 STATIC struct bio * xfs_alloc_ioend_bio()
387 struct bio *bio = bio_alloc(GFP_NOIO, nvecs); xfs_alloc_ioend_bio() local
389 ASSERT(bio->bi_private == NULL); xfs_alloc_ioend_bio()
390 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); xfs_alloc_ioend_bio()
391 bio->bi_bdev = bh->b_bdev; xfs_alloc_ioend_bio()
392 return bio; xfs_alloc_ioend_bio()
438 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) xfs_bio_add_buffer() argument
440 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); xfs_bio_add_buffer()
474 struct bio *bio; xfs_submit_ioend() local
488 bio = NULL; xfs_submit_ioend()
504 if (!bio) { xfs_submit_ioend()
506 bio = xfs_alloc_ioend_bio(bh); xfs_submit_ioend()
508 xfs_submit_ioend_bio(wbc, ioend, bio); xfs_submit_ioend()
512 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) { xfs_submit_ioend()
513 xfs_submit_ioend_bio(wbc, ioend, bio); xfs_submit_ioend()
519 if (bio) xfs_submit_ioend()
520 xfs_submit_ioend_bio(wbc, ioend, bio); xfs_submit_ioend()
353 xfs_end_bio( struct bio *bio, int error) xfs_end_bio() argument
371 xfs_submit_ioend_bio( struct writeback_control *wbc, xfs_ioend_t *ioend, struct bio *bio) xfs_submit_ioend_bio() argument
H A Dxfs_aops.h39 * It can manage several multi-page bio's at once.
H A Dxfs_buf.c25 #include <linux/bio.h>
1106 struct bio *bio, xfs_buf_bio_end_io()
1109 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; xfs_buf_bio_end_io()
1127 bio_put(bio); xfs_buf_bio_end_io()
1141 struct bio *bio; xfs_buf_ioapply_map() local
1170 bio = bio_alloc(GFP_NOIO, nr_pages); xfs_buf_ioapply_map()
1171 bio->bi_bdev = bp->b_target->bt_bdev; xfs_buf_ioapply_map()
1172 bio->bi_iter.bi_sector = sector; xfs_buf_ioapply_map()
1173 bio->bi_end_io = xfs_buf_bio_end_io; xfs_buf_ioapply_map()
1174 bio->bi_private = bp; xfs_buf_ioapply_map()
1183 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, xfs_buf_ioapply_map()
1194 if (likely(bio->bi_iter.bi_size)) { xfs_buf_ioapply_map()
1199 submit_bio(rw, bio); xfs_buf_ioapply_map()
1209 bio_put(bio); xfs_buf_ioapply_map()
1105 xfs_buf_bio_end_io( struct bio *bio, int error) xfs_buf_bio_end_io() argument
/linux-4.1.27/fs/ocfs2/cluster/
H A Dheartbeat.c27 #include <linux/bio.h>
375 static void o2hb_bio_end_io(struct bio *bio, o2hb_bio_end_io() argument
378 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; o2hb_bio_end_io()
386 bio_put(bio); o2hb_bio_end_io()
391 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, o2hb_setup_one_bio()
401 struct bio *bio; o2hb_setup_one_bio() local
408 bio = bio_alloc(GFP_ATOMIC, 16); o2hb_setup_one_bio()
409 if (!bio) { o2hb_setup_one_bio()
411 bio = ERR_PTR(-ENOMEM); o2hb_setup_one_bio()
415 /* Must put everything in 512 byte sectors for the bio... */ o2hb_setup_one_bio()
416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); o2hb_setup_one_bio()
417 bio->bi_bdev = reg->hr_bdev; o2hb_setup_one_bio()
418 bio->bi_private = wc; o2hb_setup_one_bio()
419 bio->bi_end_io = o2hb_bio_end_io; o2hb_setup_one_bio()
432 len = bio_add_page(bio, page, vec_len, vec_start); o2hb_setup_one_bio()
441 return bio; o2hb_setup_one_bio()
450 struct bio *bio; o2hb_read_slots() local
455 bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots); o2hb_read_slots()
456 if (IS_ERR(bio)) { o2hb_read_slots()
457 status = PTR_ERR(bio); o2hb_read_slots()
463 submit_bio(READ, bio); o2hb_read_slots()
481 struct bio *bio; o2hb_issue_node_write() local
487 bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1); o2hb_issue_node_write()
488 if (IS_ERR(bio)) { o2hb_issue_node_write()
489 status = PTR_ERR(bio); o2hb_issue_node_write()
495 submit_bio(WRITE_SYNC, bio); o2hb_issue_node_write()
/linux-4.1.27/net/ceph/
H A Dmessenger.c13 #include <linux/bio.h>
824 * For a bio data item, a piece is whatever remains of the next
825 * entry in the current bio iovec, or the first entry in the next
826 * bio in the list.
832 struct bio *bio; ceph_msg_data_bio_cursor_init() local
836 bio = data->bio; ceph_msg_data_bio_cursor_init()
837 BUG_ON(!bio); ceph_msg_data_bio_cursor_init()
840 cursor->bio = bio; ceph_msg_data_bio_cursor_init()
841 cursor->bvec_iter = bio->bi_iter; ceph_msg_data_bio_cursor_init()
843 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); ceph_msg_data_bio_cursor_init()
851 struct bio *bio; ceph_msg_data_bio_next() local
856 bio = cursor->bio; ceph_msg_data_bio_next()
857 BUG_ON(!bio); ceph_msg_data_bio_next()
859 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); ceph_msg_data_bio_next()
876 struct bio *bio; ceph_msg_data_bio_advance() local
881 bio = cursor->bio; ceph_msg_data_bio_advance()
882 BUG_ON(!bio); ceph_msg_data_bio_advance()
884 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); ceph_msg_data_bio_advance()
891 bio_advance_iter(bio, &cursor->bvec_iter, bytes); ceph_msg_data_bio_advance()
896 /* Move on to the next segment, and possibly the next bio */ ceph_msg_data_bio_advance()
899 bio = bio->bi_next; ceph_msg_data_bio_advance()
900 cursor->bio = bio; ceph_msg_data_bio_advance()
901 if (bio) ceph_msg_data_bio_advance()
902 cursor->bvec_iter = bio->bi_iter; ceph_msg_data_bio_advance()
910 BUG_ON(!bio); ceph_msg_data_bio_advance()
912 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) ceph_msg_data_bio_advance()
3207 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, ceph_msg_data_add_bio() argument
3212 BUG_ON(!bio); ceph_msg_data_add_bio()
3216 data->bio = bio; ceph_msg_data_add_bio()
/linux-4.1.27/drivers/scsi/mpt2sas/
H A Dmpt2sas_transport.c1944 if (bio_multiple_segments(req->bio)) { _transport_smp_handler()
1957 bio_for_each_segment(bvec, req->bio, iter) { _transport_smp_handler()
1964 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), _transport_smp_handler()
1976 if (bio_multiple_segments(rsp->bio)) { _transport_smp_handler()
1986 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), _transport_smp_handler()
2043 if (bio_multiple_segments(req->bio)) { _transport_smp_handler()
2059 if (bio_multiple_segments(rsp->bio)) { _transport_smp_handler()
2104 if (bio_multiple_segments(rsp->bio)) { _transport_smp_handler()
2108 bio_for_each_segment(bvec, rsp->bio, iter) { _transport_smp_handler()
/linux-4.1.27/drivers/scsi/mpt3sas/
H A Dmpt3sas_transport.c1927 if (bio_multiple_segments(req->bio)) { _transport_smp_handler()
1940 bio_for_each_segment(bvec, req->bio, iter) { _transport_smp_handler()
1947 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), _transport_smp_handler()
1959 if (bio_multiple_segments(rsp->bio)) { _transport_smp_handler()
1969 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), _transport_smp_handler()
2020 if (bio_multiple_segments(req->bio)) _transport_smp_handler()
2065 if (bio_multiple_segments(rsp->bio)) { _transport_smp_handler()
2069 bio_for_each_segment(bvec, rsp->bio, iter) { _transport_smp_handler()
/linux-4.1.27/drivers/mtd/maps/
H A Dscb2_flash.c32 * updates for this board include 10 related (*.bio - &.bi9) binary files and
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dlustre_compat25.h151 #define bio_hw_segments(q, bio) 0

Completed in 6734 milliseconds

12