Home
last modified time | relevance | path

Searched refs:bv_len (Results 1 – 74 of 74) sorted by relevance

/linux-4.1.27/lib/
Diov_iter.c62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
107 if (skip == bvec->bv_len) { \
399 (from += v.bv_len) - v.bv_len, v.bv_len), in copy_to_iter()
[all …]
/linux-4.1.27/include/linux/
Dbio.h71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
79 .bv_len = bvec_iter_len((bvec), (iter)), \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
137 return bio_iovec(bio).bv_len; in bio_cur_bytes()
174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
187 …__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((…
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); in bvec_gap_to_prev()
220 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { in bvec_iter_advance()
231 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
249 bio_advance_iter((bio), &(iter), (bvl).bv_len))
[all …]
Dblk_types.h25 unsigned int bv_len; member
/linux-4.1.27/arch/powerpc/sysdev/
Daxonram.c121 if (unlikely(phys_mem + vec.bv_len > phys_end)) { in axon_ram_make_request()
128 memcpy(user_mem, (void *) phys_mem, vec.bv_len); in axon_ram_make_request()
130 memcpy((void *) phys_mem, user_mem, vec.bv_len); in axon_ram_make_request()
132 phys_mem += vec.bv_len; in axon_ram_make_request()
133 transfered += vec.bv_len; in axon_ram_make_request()
/linux-4.1.27/block/
Dblk-integrity.c61 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg()
64 seg_size += iv.bv_len; in blk_rq_count_integrity_sg()
68 seg_size = iv.bv_len; in blk_rq_count_integrity_sg()
107 if (sg->length + iv.bv_len > queue_max_segment_size(q)) in blk_rq_map_integrity_sg()
110 sg->length += iv.bv_len; in blk_rq_map_integrity_sg()
120 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
Dbio.c510 memset(data, 0, bv.bv_len); in zero_fill_bio()
727 offset == prev->bv_offset + prev->bv_len) { in __bio_add_page()
728 unsigned int prev_bv_len = prev->bv_len; in __bio_add_page()
729 prev->bv_len += len; in __bio_add_page()
744 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { in __bio_add_page()
745 prev->bv_len -= len; in __bio_add_page()
772 bvec->bv_len = len; in __bio_add_page()
809 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) in __bio_add_page()
822 bvec->bv_len = 0; in __bio_add_page()
1005 bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data()
[all …]
Dblk-merge.c56 if (seg_size + bv.bv_len in __blk_recalc_rq_segments()
64 seg_size += bv.bv_len; in __blk_recalc_rq_segments()
75 seg_size = bv.bv_len; in __blk_recalc_rq_segments()
140 if (end_bv.bv_len == iter.bi_size) in blk_phys_contig_segment()
164 int nbytes = bvec->bv_len; in __blk_segment_map_sg()
231 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in __blk_bios_map_sg()
Dbounce.c59 memcpy(vto + to->bv_offset, vfrom, to->bv_len); in bounce_copy_vec()
67 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
234 memcpy(vto, vfrom, to->bv_len); in __blk_queue_bounce()
Dbio-integrity.c144 iv->bv_len = len; in bio_integrity_add_page()
235 iter.data_size = bv.bv_len; in bio_integrity_process()
Dblk-lib.c192 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); in blkdev_issue_write_same()
Dblk-core.c1451 bio->bi_io_vec->bv_len = len; in blk_add_request_payload()
/linux-4.1.27/drivers/s390/block/
Ddasd_fba.c286 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp()
289 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp()
290 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp()
291 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp()
332 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp()
336 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_build_cp()
401 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_free_cp()
412 memcpy(dst, cda, bv.bv_len); in dasd_fba_free_cp()
Ddasd_diag.c528 if (bv.bv_len & (blksize - 1)) in dasd_diag_build_cp()
531 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_diag_build_cp()
549 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_diag_build_cp()
Ddcssblk.c862 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) in dcssblk_make_request()
867 bvec.bv_len); in dcssblk_make_request()
870 bvec.bv_len); in dcssblk_make_request()
872 bytes_done += bvec.bv_len; in dcssblk_make_request()
Dxpram.c206 bytes = bvec.bv_len; in xpram_make_request()
Ddasd_eckd.c2611 if (bv.bv_len & (blksize - 1)) in dasd_eckd_build_cp_cmd_single()
2614 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_eckd_build_cp_cmd_single()
2615 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_eckd_build_cp_cmd_single()
2616 cidaw += bv.bv_len >> (block->s2b_shift + 9); in dasd_eckd_build_cp_cmd_single()
2691 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_eckd_build_cp_cmd_single()
2695 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_eckd_build_cp_cmd_single()
2850 seg_len = bv.bv_len; in dasd_eckd_build_cp_cmd_track()
3162 seg_len = bv.bv_len; in dasd_eckd_build_cp_tpm_track()
3196 dst, bv.bv_len); in dasd_eckd_build_cp_tpm_track()
3415 seg_len = bv.bv_len; in dasd_raw_build_cp()
[all …]
Dscm_blk.c205 msb->blk_count += bv.bv_len >> 12; in scm_request_prepare()
/linux-4.1.27/drivers/md/
Ddm-log-writes.c281 block->vecs[i].bv_len, 0); in log_one_block()
282 if (ret != block->vecs[i].bv_len) { in log_one_block()
298 block->vecs[i].bv_len, 0); in log_one_block()
299 if (ret != block->vecs[i].bv_len) { in log_one_block()
305 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block()
646 memcpy(dst, src + bv.bv_offset, bv.bv_len); in log_writes_map()
650 block->vecs[i].bv_len = bv.bv_len; in log_writes_map()
Draid0.c370 if (max <= biovec->bv_len && bio_sectors == 0) in raid0_mergeable_bvec()
371 return biovec->bv_len; in raid0_mergeable_bvec()
372 if (max < biovec->bv_len) in raid0_mergeable_bvec()
Dlinear.c70 int maxbytes = biovec->bv_len; in linear_mergeable_bvec()
Ddm-io.c208 *len = bvec->bv_len - dp->context_u; in bio_get_page()
Draid1.c717 int max = biovec->bv_len; in raid1_mergeable_bvec()
1017 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); in alloc_behind_pages()
1993 bi->bv_len = PAGE_SIZE; in process_checks()
1995 bi->bv_len = size; in process_checks()
2025 sbio->bi_io_vec[j].bv_len)) in process_checks()
Draid10.c708 if (max <= biovec->bv_len && bio_sectors == 0) in raid10_mergeable_bvec()
709 return biovec->bv_len; in raid10_mergeable_bvec()
711 max = biovec->bv_len; in raid10_mergeable_bvec()
722 if (max <= biovec->bv_len && bio_sectors == 0) in raid10_mergeable_bvec()
723 return biovec->bv_len; in raid10_mergeable_bvec()
2105 tbio->bi_io_vec[j].bv_len = PAGE_SIZE; in sync_request_write()
Ddm-verity.c412 len = bv.bv_len; in verity_verify_io()
Ddm.c1764 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) in dm_merge_bvec()
1765 max_size = biovec->bv_len; in dm_merge_bvec()
Draid5.c1021 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; in ops_run_io()
1070 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; in ops_run_io()
1125 int len = bvl.bv_len; in async_copy_data()
4690 return biovec->bv_len; in raid5_mergeable_bvec()
4696 if (max <= biovec->bv_len && bio_sectors == 0) in raid5_mergeable_bvec()
4697 return biovec->bv_len; in raid5_mergeable_bvec()
Ddm-crypt.c1008 bvec->bv_len = len; in crypt_alloc_buffer()
Dmd.c365 ret = biovec->bv_len; in md_mergeable_bvec()
373 ret = biovec->bv_len; in md_mergeable_bvec()
/linux-4.1.27/drivers/block/zram/
Dzram_drv.c319 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
411 if (*offset + bvec->bv_len >= PAGE_SIZE) in update_position()
413 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position()
438 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); in handle_zero_page()
553 bvec->bv_len); in zram_bvec_read()
614 bvec->bv_len); in zram_bvec_write()
712 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, in zram_bvec_rw()
943 if (bvec.bv_len > max_transfer_size) { in __zram_make_request()
951 bv.bv_len = max_transfer_size; in __zram_make_request()
957 bv.bv_len = bvec.bv_len - max_transfer_size; in __zram_make_request()
[all …]
/linux-4.1.27/drivers/md/bcache/
Dutil.c238 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
245 base += bv->bv_len; in bch_bio_map()
248 size -= bv->bv_len; in bch_bio_map()
Dio.c37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) in bch_bio_max_sectors()
41 ret += bv.bv_len >> 9; in bch_bio_max_sectors()
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); in bch_bio_max_sectors()
Ddebug.c128 bv.bv_len), in bch_data_verify()
Drequest.c46 csum = bch_crc64_update(csum, d, bv.bv_len); in bio_csum()
/linux-4.1.27/drivers/scsi/
Dsd_dif.c139 for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) { in sd_dif_prepare()
186 for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) { in sd_dif_complete()
Dsd.c829 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); in sd_setup_write_same_cmnd()
/linux-4.1.27/drivers/block/rsxx/
Ddma.c691 unsigned int bv_len; in rsxx_dma_queue_bio() local
709 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
711 while (bv_len > 0) { in rsxx_dma_queue_bio()
723 bv_len -= RSXX_HW_BLK_SIZE; in rsxx_dma_queue_bio()
727 bv_len = bvec.bv_len; in rsxx_dma_queue_bio()
730 while (bv_len > 0) { in rsxx_dma_queue_bio()
734 dma_len = min(bv_len, in rsxx_dma_queue_bio()
749 bv_len -= dma_len; in rsxx_dma_queue_bio()
/linux-4.1.27/fs/logfs/
Ddev_bdev.c26 bio_vec.bv_len = PAGE_SIZE; in sync_request()
114 bio->bi_io_vec[i].bv_len = PAGE_SIZE; in __bdev_writeseg()
204 bio->bi_io_vec[i].bv_len = PAGE_SIZE; in do_erase()
/linux-4.1.27/drivers/block/
Dloop.c210 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); in lo_write_bvec()
216 if (likely(bw == bvec->bv_len)) in lo_write_bvec()
221 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec()
263 bvec.bv_offset, bvec.bv_len, pos >> 9); in lo_write_transfer()
269 b.bv_len = bvec.bv_len; in lo_write_transfer()
288 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); in lo_read_simple()
295 if (len != bvec.bv_len) { in lo_read_simple()
327 b.bv_len = bvec.bv_len; in lo_read_transfer()
329 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); in lo_read_transfer()
343 if (len != bvec.bv_len) { in lo_read_transfer()
Dpmem.c80 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, in pmem_make_request()
82 sector += bvec.bv_len >> 9; in pmem_make_request()
Dumem.c371 vec.bv_len, in add_bio()
390 desc->transfer_size = cpu_to_le32(vec.bv_len); in add_bio()
406 bio_advance_iter(bio, &card->current_iter, vec.bv_len); in add_bio()
445 bio_advance_iter(bio, &page->iter, vec.bv_len); in process_page()
454 vec.bv_len, in process_page()
Dnbd.c222 bvec->bv_len, flags); in sock_send_bvec()
267 req, bvec.bv_len); in nbd_send_req()
307 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, in sock_recv_bvec()
368 req, bvec.bv_len); in nbd_read_stat()
Dbrd.c351 unsigned int len = bvec.bv_len; in brd_make_request()
Dps3disk.c108 size = bvec.bv_len; in ps3disk_scatter_gather()
Drbd.c1258 if (pos + bv.bv_len > start_ofs) { in zero_bio_chain()
1262 bv.bv_len - remainder); in zero_bio_chain()
1266 pos += bv.bv_len; in zero_bio_chain()
3497 rbd_assert(bvec->bv_len <= PAGE_SIZE); in rbd_merge_bvec()
3498 if (ret > (int) bvec->bv_len || !bmd->bi_size) in rbd_merge_bvec()
3499 ret = (int) bvec->bv_len; in rbd_merge_bvec()
Dps3vram.c565 size_t len = bvec.bv_len, retlen; in ps3vram_do_bio()
Dfloppy.c2380 size += bv.bv_len; in buffer_chain_size()
2444 size = bv.bv_len; in copy_buffer()
3809 bio_vec.bv_len = size; in __floppy_read_block_0()
/linux-4.1.27/fs/btrfs/
Dfile-item.c246 offset + bvec->bv_len - 1, in __btrfs_lookup_bio_sums()
286 disk_bytenr += bvec->bv_len; in __btrfs_lookup_bio_sums()
287 offset += bvec->bv_len; in __btrfs_lookup_bio_sums()
487 bvec->bv_len); in btrfs_csum_one_bio()
494 total_bytes += bvec->bv_len; in btrfs_csum_one_bio()
495 this_sum_bytes += bvec->bv_len; in btrfs_csum_one_bio()
496 offset += bvec->bv_len; in btrfs_csum_one_bio()
Dextent_io.c2499 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { in end_bio_extent_writepage()
2500 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) in end_bio_extent_writepage()
2503 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
2508 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
2512 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2579 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { in end_bio_extent_readpage()
2580 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) in end_bio_extent_readpage()
2583 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
2588 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
2592 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
[all …]
Dinode.c7775 start + bvec->bv_len - 1, in __btrfs_correct_data_nocsum()
7788 start += bvec->bv_len; in __btrfs_correct_data_nocsum()
7810 done->start, bvec->bv_len); in btrfs_retry_endio()
7840 0, start, bvec->bv_len); in __btrfs_subio_endio_read()
7849 start + bvec->bv_len - 1, in __btrfs_subio_endio_read()
7864 offset += bvec->bv_len; in __btrfs_subio_endio_read()
7865 start += bvec->bv_len; in __btrfs_subio_endio_read()
8143 if (map_length < submit_len + bvec->bv_len || in btrfs_submit_direct_hook()
8144 bio_add_page(bio, bvec->bv_page, bvec->bv_len, in btrfs_submit_direct_hook()
8145 bvec->bv_offset) < bvec->bv_len) { in btrfs_submit_direct_hook()
[all …]
Dcompression.c1076 unsigned long len = bvec[pg_index].bv_len; in btrfs_clear_biovec_end()
Dcheck-integrity.c2999 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); in __btrfsic_submit_bio()
3013 i, cur_bytenr, bio->bi_io_vec[i].bv_len, in __btrfsic_submit_bio()
3015 cur_bytenr += bio->bi_io_vec[i].bv_len; in __btrfsic_submit_bio()
Dvolumes.c5742 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; in bio_size_ok()
5743 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) in bio_size_ok()
5796 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, in breakup_stripe_bio()
5797 bvec->bv_offset) < bvec->bv_len) { in breakup_stripe_bio()
/linux-4.1.27/fs/9p/
Dvfs_addr.c55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
176 bvec.bv_len = len; in v9fs_vfs_writepage_locked()
/linux-4.1.27/mm/
Dpage_io.c37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; in get_swap_bio()
269 .bv_len = PAGE_SIZE, in __swap_writepage()
/linux-4.1.27/arch/m68k/emu/
Dnfblock.c73 len = bvec.bv_len; in nfhd_make_request()
/linux-4.1.27/Documentation/block/
Dbiovecs.txt14 bv_len by the number of bytes completed in that biovec.
19 and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
Dbiodoc.txt424 unsigned short bv_len;
/linux-4.1.27/drivers/scsi/mpt3sas/
Dmpt3sas_transport.c1943 bvec.bv_len); in _transport_smp_handler()
1944 offset += bvec.bv_len; in _transport_smp_handler()
2070 if (bytes_to_copy <= bvec.bv_len) { in _transport_smp_handler()
2078 offset, bvec.bv_len); in _transport_smp_handler()
2079 bytes_to_copy -= bvec.bv_len; in _transport_smp_handler()
2081 offset += bvec.bv_len; in _transport_smp_handler()
/linux-4.1.27/drivers/scsi/mpt2sas/
Dmpt2sas_transport.c1960 bvec.bv_len); in _transport_smp_handler()
1961 offset += bvec.bv_len; in _transport_smp_handler()
2109 if (bytes_to_copy <= bvec.bv_len) { in _transport_smp_handler()
2117 offset, bvec.bv_len); in _transport_smp_handler()
2118 bytes_to_copy -= bvec.bv_len; in _transport_smp_handler()
2120 offset += bvec.bv_len; in _transport_smp_handler()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dlloop.c222 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); in do_bio_lustrebacked()
227 offset += bvec.bv_len; in do_bio_lustrebacked()
/linux-4.1.27/arch/xtensa/platforms/iss/
Dsimdisk.c112 unsigned len = bvec.bv_len >> SECTOR_SHIFT; in simdisk_xfer_bio()
/linux-4.1.27/drivers/target/
Dtarget_core_file.c340 bvec[i].bv_len = sg->length; in fd_do_rw()
466 bvec[i].bv_len = cmd->t_data_sg[0].length; in fd_execute_write_same()
/linux-4.1.27/arch/arm/include/asm/
Dio.h382 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
/linux-4.1.27/fs/ext4/
Dpage-io.c75 unsigned bio_end = bio_start + bvec->bv_len; in ext4_finish_bio()
/linux-4.1.27/drivers/block/aoe/
Daoecmd.c304 bv.bv_offset, bv.bv_len); in skb_fillup()
1096 skb_copy_bits(skb, soff, p, bv.bv_len); in bvcpy()
1097 soff += bv.bv_len; in bvcpy()
/linux-4.1.27/fs/
Dbuffer.c2991 bvec->bv_len -= truncated_bytes; in guard_bio_eod()
2995 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, in guard_bio_eod()
3026 bio->bi_io_vec[0].bv_len = bh->b_size; in _submit_bh()
Dsplice.c1007 array[n].bv_len = this_len; in iter_file_splice_write()
/linux-4.1.27/fs/gfs2/
Dlops.c180 size = bvec->bv_len; in gfs2_end_log_write_bh()
/linux-4.1.27/fs/jfs/
Djfs_logmgr.c2003 bio->bi_io_vec[0].bv_len = LOGPSIZE; in lbmRead()
2149 bio->bi_io_vec[0].bv_len = LOGPSIZE; in lbmStartIO()
/linux-4.1.27/fs/exofs/
Dore.c411 unsigned this_count = bv->bv_len; in _clear_bio()
/linux-4.1.27/net/ceph/
Dmessenger.c866 *length = (size_t) bio_vec.bv_len; in ceph_msg_data_bio_next()
893 if (bytes < bio_vec.bv_len) in ceph_msg_data_bio_advance()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_main.c1558 bvec.bv_offset, bvec.bv_len, in _drbd_send_bio()
1577 bvec.bv_offset, bvec.bv_len, in _drbd_send_zc_bio()
Ddrbd_worker.c335 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in drbd_csum_bio()
Ddrbd_receiver.c1733 expect = min_t(int, data_size, bvec.bv_len); in recv_dless_read()