Home
last modified time | relevance | path

Searched refs:iov_len (Results 1 – 155 of 155) sorted by relevance

/linux-4.1.27/lib/
Diov_iter.c12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
[all …]
/linux-4.1.27/net/sunrpc/
Dxdr.c138 unsigned int buflen = head->iov_len; in xdr_inline_pages()
140 head->iov_len = offset; in xdr_inline_pages()
147 tail->iov_len = buflen - offset; in xdr_inline_pages()
325 WARN_ON_ONCE(len > head->iov_len); in xdr_shrink_bufhead()
326 if (len > head->iov_len) in xdr_shrink_bufhead()
327 len = head->iov_len; in xdr_shrink_bufhead()
330 if (tail->iov_len != 0) { in xdr_shrink_bufhead()
331 if (tail->iov_len > len) { in xdr_shrink_bufhead()
332 copy = tail->iov_len - len; in xdr_shrink_bufhead()
341 if (offs >= tail->iov_len) in xdr_shrink_bufhead()
[all …]
Dsvcsock.c191 if (slen == xdr->head[0].iov_len) in svc_send_common()
194 xdr->head[0].iov_len, flags); in svc_send_common()
195 if (len != xdr->head[0].iov_len) in svc_send_common()
197 slen -= xdr->head[0].iov_len; in svc_send_common()
219 if (xdr->tail[0].iov_len) { in svc_send_common()
221 xdr->tail[0].iov_len, 0); in svc_send_common()
271 svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, in svc_sendto()
339 svsk, iov[0].iov_base, iov[0].iov_len, len); in svc_recvfrom()
356 if (iov[i].iov_len > base) in svc_partial_recvfrom()
358 base -= iov[i].iov_len; in svc_partial_recvfrom()
[all …]
Dbackchannel_rqst.c131 xbufp->head[0].iov_len = PAGE_SIZE; in xprt_setup_backchannel()
133 xbufp->tail[0].iov_len = 0; in xprt_setup_backchannel()
147 xbufp->head[0].iov_len = 0; in xprt_setup_backchannel()
149 xbufp->tail[0].iov_len = 0; in xprt_setup_backchannel()
Dsvc.c1087 if (argv->iov_len < 6*4) in svc_process_common()
1110 reply_statp = resv->iov_base + resv->iov_len; in svc_process_common()
1168 statp = resv->iov_base +resv->iov_len; in svc_process_common()
1201 !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { in svc_process_common()
1218 resv->iov_len = ((void*)statp) - resv->iov_base + 4; in svc_process_common()
1239 argv->iov_len); in svc_process_common()
1312 resv->iov_len = 0; in svc_process()
1319 rqstp->rq_res.tail[0].iov_len = 0; in svc_process()
1366 resv->iov_len = 0; in bc_svc_process()
Dsocklib.c79 len = xdr->head[0].iov_len; in xdr_partial_copy_from_skb()
137 len = xdr->tail[0].iov_len; in xdr_partial_copy_from_skb()
Dsvc_xprt.c457 space += rqstp->rq_res.head[0].iov_len; in svc_reserve()
494 rqstp->rq_res.head[0].iov_len = 0; in svc_xprt_release()
632 arg->head[0].iov_len = PAGE_SIZE; in svc_alloc_arg()
638 arg->tail[0].iov_len = 0; in svc_alloc_arg()
878 xb->len = xb->head[0].iov_len + in svc_send()
880 xb->tail[0].iov_len; in svc_send()
1137 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; in svc_defer()
1159 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; in svc_deferred_recv()
Dxprtsock.c335 .iov_len = vec->iov_len - base, in xs_send_kvec()
338 if (iov.iov_len != 0) in xs_send_kvec()
339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); in xs_send_kvec()
405 if (base < xdr->head[0].iov_len || addr != NULL) { in xs_sendpages()
406 unsigned int len = xdr->head[0].iov_len - base; in xs_sendpages()
414 base -= xdr->head[0].iov_len; in xs_sendpages()
427 if (base >= xdr->tail[0].iov_len) in xs_sendpages()
524 req->rq_svec->iov_base, req->rq_svec->iov_len); in xs_local_send_request()
578 req->rq_svec->iov_len); in xs_udp_send_request()
671 req->rq_svec->iov_len); in xs_tcp_send_request()
Dsvcauth_unix.c735 if (argv->iov_len < 3*4) in svcauth_null_accept()
795 int len = argv->iov_len; in svcauth_unix_accept()
810 argv->iov_len -= slen*4; in svcauth_unix_accept()
Dclnt.c1060 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + in rpc_run_bc_task()
1061 xbufp->tail[0].iov_len; in rpc_run_bc_task()
1646 buf->head[0].iov_len = len; in rpc_xdr_buf_init()
1647 buf->tail[0].iov_len = 0; in rpc_xdr_buf_init()
/linux-4.1.27/net/sunrpc/xprtrdma/
Dsvc_rdma_sendto.c64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) { in map_xdr()
74 vec->sge[sge_no].iov_len = xdr->head[0].iov_len; in map_xdr()
86 vec->sge[sge_no].iov_len = sge_bytes; in map_xdr()
94 if (xdr->tail[0].iov_len) { in map_xdr()
96 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; in map_xdr()
103 xdr->head[0].iov_len, xdr->tail[0].iov_len); in map_xdr()
115 if (xdr_off < xdr->head[0].iov_len) { in dma_map_xdr()
120 xdr_off -= xdr->head[0].iov_len; in dma_map_xdr()
173 if (vec->sge[xdr_sge_no].iov_len > bc) in send_write()
175 bc -= vec->sge[xdr_sge_no].iov_len; in send_write()
[all …]
Drpc_rdma.c92 if (pos == 0 && xdrbuf->head[0].iov_len) { in rpcrdma_convert_iovs()
95 seg[n].mr_len = xdrbuf->head[0].iov_len; in rpcrdma_convert_iovs()
125 if (xdrbuf->tail[0].iov_len) { in rpcrdma_convert_iovs()
128 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) in rpcrdma_convert_iovs()
135 seg[n].mr_len = xdrbuf->tail[0].iov_len; in rpcrdma_convert_iovs()
207 pos = target->head[0].iov_len; in rpcrdma_create_chunks()
314 curlen = rqst->rq_svec[0].iov_len; in rpcrdma_inline_pullup()
329 if (rqst->rq_snd_buf.tail[0].iov_len) { in rpcrdma_inline_pullup()
330 curlen = rqst->rq_snd_buf.tail[0].iov_len; in rpcrdma_inline_pullup()
338 rqst->rq_svec[0].iov_len += curlen; in rpcrdma_inline_pullup()
[all …]
Dsvc_rdma_recvfrom.c74 rqstp->rq_arg.head[0].iov_len = in rdma_build_arg_xdr()
80 bc = byte_count - rqstp->rq_arg.head[0].iov_len; in rdma_build_arg_xdr()
115 rqstp->rq_arg.tail[0].iov_len = 0; in rdma_build_arg_xdr()
382 byte_count = head->arg.head[0].iov_len - position; in rdma_copy_tail()
412 byte_count = head->arg.head[0].iov_len - position; in rdma_copy_tail()
499 if (position && position < head->arg.head[0].iov_len) in rdma_read_chunks()
502 head->arg.head[0].iov_len = position; in rdma_read_chunks()
529 head->arg.head[0].iov_len = head->arg.len - in rdma_read_complete()
533 head->arg.head[0].iov_len = head->sge[0].length - in rdma_read_complete()
562 ret = rqstp->rq_arg.head[0].iov_len in rdma_read_complete()
[all …]
Dsvc_rdma_marshal.c185 rqstp->rq_arg.head[0].iov_len -= hdrlen; in svc_rdma_xdr_decode_req()
208 rqstp->rq_arg.head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req()
231 rqstp->rq_arg.head[0].iov_len -= hdrlen; in svc_rdma_xdr_decode_deferred_req()
265 rqstp->rq_arg.head[0].iov_len -= hdrlen; in svc_rdma_xdr_decode_deferred_req()
/linux-4.1.27/include/linux/sunrpc/
Dsvc.h183 iov->iov_len -= sizeof(__be32); in svc_getnl()
189 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putnl()
191 iov->iov_len += sizeof(__be32); in svc_putnl()
200 iov->iov_len -= sizeof(__be32); in svc_getu32()
208 iov->iov_len += sizeof(*vp); in svc_ungetu32()
213 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putu32()
215 iov->iov_len += sizeof(__be32); in svc_putu32()
334 && cp <= (char*)vec->iov_base + vec->iov_len; in xdr_argsize_check()
343 vec->iov_len = cp - (char*)vec->iov_base; in xdr_ressize_check()
345 return vec->iov_len <= PAGE_SIZE; in xdr_ressize_check()
Dxdr.h146 return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base); in xdr_adjust_iovec()
/linux-4.1.27/fs/nfsd/
Dnfscache.c147 drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_reply_cache_free_locked()
320 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum()
322 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum()
474 drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_cache_lookup()
553 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
576 cachv->iov_len = bufsize; in nfsd_cache_update()
603 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append()
605 data->iov_len); in nfsd_cache_append()
608 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append()
609 vec->iov_len += data->iov_len; in nfsd_cache_append()
Dnfsxdr.c270 rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); in nfssvc_decode_readargs()
271 len -= rqstp->rq_vec[v].iov_len; in nfssvc_decode_readargs()
304 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len in nfssvc_decode_writeargs()
319 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; in nfssvc_decode_writeargs()
321 while (len > rqstp->rq_vec[v].iov_len) { in nfssvc_decode_writeargs()
322 len -= rqstp->rq_vec[v].iov_len; in nfssvc_decode_writeargs()
325 rqstp->rq_vec[v].iov_len = PAGE_SIZE; in nfssvc_decode_writeargs()
327 rqstp->rq_vec[v].iov_len = len; in nfssvc_decode_writeargs()
445 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); in nfssvc_encode_readlinkres()
464 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); in nfssvc_encode_readres()
Dnfs3xdr.c347 rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); in nfs3svc_decode_readargs()
348 len -= rqstp->rq_vec[v].iov_len; in nfs3svc_decode_readargs()
381 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len in nfs3svc_decode_writeargs()
399 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; in nfs3svc_decode_writeargs()
401 while (len > rqstp->rq_vec[v].iov_len) { in nfs3svc_decode_writeargs()
402 len -= rqstp->rq_vec[v].iov_len; in nfs3svc_decode_writeargs()
405 rqstp->rq_vec[v].iov_len = PAGE_SIZE; in nfs3svc_decode_writeargs()
407 rqstp->rq_vec[v].iov_len = len; in nfs3svc_decode_writeargs()
474 avail = vec->iov_len - (old - (char*)vec->iov_base); in nfs3svc_decode_symlinkargs()
685 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); in nfs3svc_encode_readlinkres()
[all …]
Dnfs4proc.c968 vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len); in fill_in_write_vector()
969 buflen -= vec[0].iov_len; in fill_in_write_vector()
973 vec[i].iov_len = min_t(int, PAGE_SIZE, buflen); in fill_in_write_vector()
974 buflen -= vec[i].iov_len; in fill_in_write_vector()
1594 xdr->p = head->iov_base + head->iov_len; in svcxdr_init_encode()
1597 buf->len = buf->head[0].iov_len; in svcxdr_init_encode()
1598 xdr->scratch.iov_len = 0; in svcxdr_init_encode()
Dnfssvc.c690 + rqstp->rq_res.head[0].iov_len; in nfsd_dispatch()
691 rqstp->rq_res.head[0].iov_len += sizeof(__be32); in nfsd_dispatch()
Dnfs4xdr.c1234 write->wr_head.iov_len = avail; in nfsd4_decode_write()
2782 xdr->scratch.iov_len = 0; in svcxdr_init_encode_from_buffer()
2785 buf->head[0].iov_len = 0; in svcxdr_init_encode_from_buffer()
3360 buf->tail[0].iov_len = 0; in nfsd4_encode_splice_read()
3368 buf->tail[0].iov_len = pad; in nfsd4_encode_splice_read()
3403 resp->rqstp->rq_vec[v].iov_len = thislen; in nfsd4_encode_readv()
3412 resp->rqstp->rq_vec[v].iov_len = thislen; in nfsd4_encode_readv()
3560 resp->xdr.buf->head[0].iov_len = ((char *)resp->xdr.p) in nfsd4_encode_readdir()
4429 if (rqstp->rq_arg.head[0].iov_len % 4) { in nfs4svc_decode_compoundargs()
4436 args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len; in nfs4svc_decode_compoundargs()
[all …]
/linux-4.1.27/net/sunrpc/auth_gss/
Dsvcauth_gss.c624 if (argv->iov_len < 4) in svc_safe_getnetobj()
628 if (argv->iov_len < l) in svc_safe_getnetobj()
632 argv->iov_len -= l; in svc_safe_getnetobj()
641 if (resv->iov_len + 4 > PAGE_SIZE) in svc_safe_putnetobj()
644 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj()
645 resv->iov_len += round_up_to_quad(o->len); in svc_safe_putnetobj()
646 if (resv->iov_len > PAGE_SIZE) in svc_safe_putnetobj()
671 iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart; in gss_verify_header()
675 if (argv->iov_len < 4) in gss_verify_header()
710 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_null_verf()
[all …]
Dgss_krb5_wrap.c55 if (buf->page_len || buf->tail[0].iov_len) in gss_krb5_add_padding()
59 p = iov->iov_base + iov->iov_len; in gss_krb5_add_padding()
60 iov->iov_len += padding; in gss_krb5_add_padding()
72 if (len <= buf->head[0].iov_len) { in gss_krb5_remove_padding()
74 if (pad > buf->head[0].iov_len) in gss_krb5_remove_padding()
76 buf->head[0].iov_len -= pad; in gss_krb5_remove_padding()
79 len -= buf->head[0].iov_len; in gss_krb5_remove_padding()
91 BUG_ON(len > buf->tail[0].iov_len); in gss_krb5_remove_padding()
378 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; in gss_unwrap_kerberos_v1()
380 buf->head[0].iov_len -= (data_start - orig_start); in gss_unwrap_kerberos_v1()
[all …]
Dgss_krb5_crypto.c414 page_pos = desc->pos - outbuf->head[0].iov_len; in encryptor()
588 memmove(p + shiftlen, p, buf->head[0].iov_len - base); in xdr_extend_head()
590 buf->head[0].iov_len += shiftlen; in xdr_extend_head()
679 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; in gss_krb5_aes_encrypt()
682 + buf->head[0].iov_len; in gss_krb5_aes_encrypt()
683 buf->tail[0].iov_len = 0; in gss_krb5_aes_encrypt()
689 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; in gss_krb5_aes_encrypt()
694 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; in gss_krb5_aes_encrypt()
750 buf->tail[0].iov_len += kctx->gk5e->cksumlength; in gss_krb5_aes_encrypt()
Dauth_gss.c1510 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; in gss_marshal()
1626 iov.iov_len = sizeof(seq); in gss_validate()
1690 if (snd_buf->page_len || snd_buf->tail[0].iov_len) in gss_wrap_req_integ()
1694 p = iov->iov_base + iov->iov_len; in gss_wrap_req_integ()
1706 iov->iov_len += offset; in gss_wrap_req_integ()
1791 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { in gss_wrap_req_priv()
1793 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); in gss_wrap_req_priv()
1809 if (snd_buf->page_len || snd_buf->tail[0].iov_len) in gss_wrap_req_priv()
1813 p = iov->iov_base + iov->iov_len; in gss_wrap_req_priv()
1816 iov->iov_len += pad; in gss_wrap_req_priv()
[all …]
/linux-4.1.27/net/rxrpc/
Dar-ack.c925 iov[0].iov_len = sizeof(hdr); in rxrpc_process_call()
980 iov[1].iov_len = sizeof(data); in rxrpc_process_call()
1004 iov[1].iov_len = sizeof(ack); in rxrpc_process_call()
1006 iov[2].iov_len = 3; in rxrpc_process_call()
1008 iov[3].iov_len = sizeof(ackinfo); in rxrpc_process_call()
1128 iov[1].iov_len = sizeof(ack); in rxrpc_process_call()
1130 iov[2].iov_len = ack.nAcks; in rxrpc_process_call()
1132 iov[3].iov_len = 3; in rxrpc_process_call()
1134 iov[4].iov_len = sizeof(ackinfo); in rxrpc_process_call()
1247 len = iov[0].iov_len; in rxrpc_process_call()
[all …]
Dar-connevent.c106 iov[0].iov_len = sizeof(hdr); in rxrpc_abort_connection()
108 iov[1].iov_len = sizeof(word); in rxrpc_abort_connection()
110 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_abort_connection()
352 iov[0].iov_len = sizeof(hdr); in rxrpc_reject_packets()
354 iov[1].iov_len = sizeof(code); in rxrpc_reject_packets()
Dar-local.c355 iov[0].iov_len = sizeof(*hdr); in rxrpc_send_version_request()
357 iov[1].iov_len = sizeof(rxrpc_version_string); in rxrpc_send_version_request()
359 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_version_request()
Drxkad.c607 iov[0].iov_len = sizeof(hdr); in rxkad_issue_challenge()
609 iov[1].iov_len = sizeof(challenge); in rxkad_issue_challenge()
611 len = iov[0].iov_len + iov[1].iov_len; in rxkad_issue_challenge()
655 iov[0].iov_len = sizeof(*hdr); in rxkad_send_response()
657 iov[1].iov_len = sizeof(*resp); in rxkad_send_response()
659 iov[2].iov_len = s2->ticket_len; in rxkad_send_response()
661 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; in rxkad_send_response()
Dar-output.c341 iov[0].iov_len = skb->len; in rxrpc_send_packet()
360 iov[0].iov_len); in rxrpc_send_packet()
380 iov[0].iov_len); in rxrpc_send_packet()
Dar-accept.c52 iov[0].iov_len = sizeof(*hdr); in rxrpc_busy()
54 len = iov[0].iov_len; in rxrpc_busy()
/linux-4.1.27/include/linux/
Duio.h19 size_t iov_len; member
53 ret += iov[seg].iov_len; in iov_length()
61 .iov_len = min(iter->count, in iov_iter_iovec()
62 iter->iov->iov_len - iter->iov_offset), in iov_iter_iovec()
71 iov_iter_advance(&(iter), (iov).iov_len))
Dvringh.h129 iov->iov[iov->i].iov_len += iov->consumed; in vringh_iov_reset()
191 kiov->iov[kiov->i].iov_len += kiov->consumed; in vringh_kiov_reset()
Dcompat.h169 compat_size_t iov_len; member
/linux-4.1.27/fs/cifs/
Dsmb2transport.c175 if (iov[i].iov_len == 0) in smb2_calc_signature()
186 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ in smb2_calc_signature()
191 iov[i].iov_base + 4, iov[i].iov_len - 4); in smb2_calc_signature()
196 iov[i].iov_base, iov[i].iov_len); in smb2_calc_signature()
211 p_iov.iov_base, p_iov.iov_len); in smb2_calc_signature()
346 if (iov[i].iov_len == 0) in smb3_calc_signature()
357 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ in smb3_calc_signature()
362 iov[i].iov_base + 4, iov[i].iov_len - 4); in smb3_calc_signature()
367 iov[i].iov_base, iov[i].iov_len); in smb3_calc_signature()
382 p_iov.iov_base, p_iov.iov_len); in smb3_calc_signature()
[all …]
Dtransport.c158 remaining += iov[i].iov_len; in smb_send_kvec()
222 if (iov[i].iov_len) { in smb_send_kvec()
223 if (rc > iov[i].iov_len) { in smb_send_kvec()
224 rc -= iov[i].iov_len; in smb_send_kvec()
225 iov[i].iov_len = 0; in smb_send_kvec()
228 iov[i].iov_len -= rc; in smb_send_kvec()
268 iov->iov_len = rqst->rq_tailsz; in cifs_rqst_page_to_kvec()
270 iov->iov_len = rqst->rq_pagesz; in cifs_rqst_page_to_kvec()
282 buflen += iov[i].iov_len; in rqst_len()
318 dump_smb(iov[0].iov_base, iov[0].iov_len); in smb_send_rqst()
[all …]
Dsess.c564 sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; in sess_alloc_buffer()
583 sess_data->iov[0].iov_len = 0; in sess_alloc_buffer()
636 count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; in sess_sendreceive()
711 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_lanman()
812 if (sess_data->iov[0].iov_len % 2) { in sess_auth_ntlm()
822 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_ntlm()
920 if (sess_data->iov[0].iov_len % 2) { in sess_auth_ntlmv2()
930 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_ntlmv2()
1039 sess_data->iov[1].iov_len = msg->secblob_len; in sess_auth_kerberos()
1040 pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); in sess_auth_kerberos()
[all …]
Dsmb2pdu.c373 iov[0].iov_len = get_rfc1002_length(req) + 4; in SMB2_negotiate()
591 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; in SMB2_sess_setup()
628 iov[1].iov_len = blob_length; in SMB2_sess_setup()
657 iov[1].iov_len = blob_length; in SMB2_sess_setup()
686 iov[1].iov_len = blob_length; in SMB2_sess_setup()
892 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; in SMB2_tcon()
899 iov[1].iov_len = unc_path_len; in SMB2_tcon()
1078 iov[num].iov_len = server->vals->create_lease_size; in add_lease_context()
1083 iov[num - 1].iov_len); in add_lease_context()
1106 iov[num].iov_len = sizeof(struct create_durable); in add_durable_context()
[all …]
Dcifsencrypt.c108 if (iov[i].iov_len == 0) in cifs_calc_signature()
117 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ in cifs_calc_signature()
121 iov[i].iov_base + 4, iov[i].iov_len - 4); in cifs_calc_signature()
125 iov[i].iov_base, iov[i].iov_len); in cifs_calc_signature()
140 p_iov.iov_base, p_iov.iov_len); in cifs_calc_signature()
203 iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4; in cifs_sign_smb()
Dfile.c1682 iov[1].iov_len = len; in cifs_write()
2868 iov.iov_len = PAGE_SIZE; in cifs_uncached_read_into_pages()
2870 i, iov.iov_base, iov.iov_len); in cifs_uncached_read_into_pages()
2875 iov.iov_len = len; in cifs_uncached_read_into_pages()
2877 i, iov.iov_base, iov.iov_len); in cifs_uncached_read_into_pages()
2889 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); in cifs_uncached_read_into_pages()
3321 iov.iov_len = PAGE_CACHE_SIZE; in cifs_readpages_read_into_pages()
3323 i, page->index, iov.iov_base, iov.iov_len); in cifs_readpages_read_into_pages()
3328 iov.iov_len = len; in cifs_readpages_read_into_pages()
3330 i, page->index, iov.iov_base, iov.iov_len); in cifs_readpages_read_into_pages()
[all …]
Dcifssmb.c727 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; in CIFSSMBEcho()
1450 rdata->iov.iov_len = len; in cifs_readv_receive()
1505 rdata->iov.iov_len = len; in cifs_readv_receive()
1514 rdata->iov.iov_len = server->total_read; in cifs_readv_receive()
1516 rdata->iov.iov_base, rdata->iov.iov_len); in cifs_readv_receive()
1645 rdata->iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; in cifs_async_readv()
1719 iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; in CIFSSMBRead()
2133 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; in cifs_async_writev()
2158 iov.iov_len += 4; /* pad bigger by four bytes */ in cifs_async_writev()
2243 iov[0].iov_len = smb_hdr_len + 4; in CIFSSMBWrite2()
[all …]
Dconnect.c508 while (bytes || !iov->iov_len) { in kvec_array_init()
509 int copy = min(bytes, iov->iov_len); in kvec_array_init()
513 if (iov->iov_len == base) { in kvec_array_init()
521 new->iov_len -= base; in kvec_array_init()
609 iov.iov_len = to_read; in cifs_read_from_socket()
Dlink.c520 iov[1].iov_len = CIFS_MF_SYMLINK_FILE_SIZE; in smb3_create_mf_symlink()
Ddir.c680 iov[1].iov_len = sizeof(struct win_dev); in cifs_mknod()
/linux-4.1.27/mm/
Dprocess_vm_access.c154 ssize_t iov_len; in process_vm_rw_core() local
162 iov_len = rvec[i].iov_len; in process_vm_rw_core()
163 if (iov_len > 0) { in process_vm_rw_core()
165 + iov_len) in process_vm_rw_core()
211 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, in process_vm_rw_core()
Dswap.c385 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) in get_kernel_pages()
411 .iov_len = PAGE_SIZE in get_kernel_page()
/linux-4.1.27/drivers/hv/
Dchannel.c606 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor); in vmbus_sendpacket_ctl()
608 bufferlist[1].iov_len = bufferlen; in vmbus_sendpacket_ctl()
610 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket_ctl()
709 bufferlist[0].iov_len = descsize; in vmbus_sendpacket_pagebuffer_ctl()
711 bufferlist[1].iov_len = bufferlen; in vmbus_sendpacket_pagebuffer_ctl()
713 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket_pagebuffer_ctl()
782 bufferlist[0].iov_len = desc_size; in vmbus_sendpacket_mpb_desc()
784 bufferlist[1].iov_len = bufferlen; in vmbus_sendpacket_mpb_desc()
786 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket_mpb_desc()
845 bufferlist[0].iov_len = descsize; in vmbus_sendpacket_multipagebuffer()
[all …]
Dring_buffer.c344 totalbytes_towrite += kv_list[i].iov_len; in hv_ringbuffer_write()
373 kv_list[i].iov_len); in hv_ringbuffer_write()
/linux-4.1.27/kernel/
Dptrace.c799 if (!regset || (kiov->iov_len % regset->size) != 0) in ptrace_regset()
803 kiov->iov_len = min(kiov->iov_len, in ptrace_regset()
808 kiov->iov_len, kiov->iov_base); in ptrace_regset()
811 kiov->iov_len, kiov->iov_base); in ptrace_regset()
1020 __get_user(kiov.iov_len, &uiov->iov_len)) in ptrace_request()
1025 ret = __put_user(kiov.iov_len, &uiov->iov_len); in ptrace_request()
1181 __get_user(len, &uiov->iov_len)) in compat_ptrace_request()
1185 kiov.iov_len = len; in compat_ptrace_request()
1189 ret = __put_user(kiov.iov_len, &uiov->iov_len); in compat_ptrace_request()
/linux-4.1.27/drivers/usb/usbip/
Dvhci_tx.c92 iov[0].iov_len = sizeof(pdu_header); in vhci_send_cmd_submit()
98 iov[1].iov_len = urb->transfer_buffer_length; in vhci_send_cmd_submit()
114 iov[2].iov_len = len; in vhci_send_cmd_submit()
184 iov[0].iov_len = sizeof(pdu_header); in vhci_send_cmd_unlink()
Dstub_tx.c198 iov[iovnum].iov_len = sizeof(pdu_header); in stub_send_ret_submit()
207 iov[iovnum].iov_len = urb->actual_length; in stub_send_ret_submit()
225 iov[iovnum].iov_len = in stub_send_ret_submit()
256 iov[iovnum].iov_len = len; in stub_send_ret_submit()
333 iov[0].iov_len = sizeof(pdu_header); in stub_send_ret_unlink()
Dusbip_common.c347 iov.iov_len = size; in usbip_recv()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/
Dsocklnd_lib-linux.c107 nob += scratchiov[i].iov_len; in ksocknal_lib_send_iov()
172 nob += scratchiov[i].iov_len = kiov[i].kiov_len; in ksocknal_lib_send_kiov()
230 nob += scratchiov[i].iov_len; in ksocknal_lib_recv_iov()
248 fragnob = iov[i].iov_len; in ksocknal_lib_recv_iov()
301 iov->iov_len = nob; in ksocknal_lib_kiov_vmap()
339 nob = scratchiov[0].iov_len; in ksocknal_lib_recv_kiov()
344 nob += scratchiov[i].iov_len = kiov[i].kiov_len; in ksocknal_lib_recv_kiov()
400 tx->tx_iov[0].iov_len); in ksocknal_lib_csum_tx()
414 tx->tx_iov[i].iov_len); in ksocknal_lib_csum_tx()
Dsocklnd_cb.c133 if (nob < (int) iov->iov_len) { in ksocknal_send_iov()
135 iov->iov_len -= nob; in ksocknal_send_iov()
139 nob -= iov->iov_len; in ksocknal_send_iov()
282 if (nob < (int)iov->iov_len) { in ksocknal_recv_iov()
283 iov->iov_len -= nob; in ksocknal_recv_iov()
288 nob -= iov->iov_len; in ksocknal_recv_iov()
1055 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u); in ksocknal_new_packet()
1066 conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); in ksocknal_new_packet()
1093 conn->ksnc_rx_iov[niov].iov_len = nob; in ksocknal_new_packet()
1217 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t); in ksocknal_process_receive()
Dsocklnd_proto.c721 tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); in ksocknal_pack_msg_v1()
735 tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); in ksocknal_pack_msg_v2()
740 tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); in ksocknal_pack_msg_v2()
/linux-4.1.27/fs/ncpfs/
Dsock.c54 vec.iov_len = len; in _send()
246 while (iov->iov_len <= result) { in __ncptcp_try_send()
247 result -= iov->iov_len; in __ncptcp_try_send()
252 iov->iov_len -= result; in __ncptcp_try_send()
274 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, in ncpdgram_start_request()
278 req->tx_ciov[1].iov_len = signlen; in ncpdgram_start_request()
302 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, in ncptcp_start_request()
310 req->tx_iov[0].iov_len = signlen; in ncptcp_start_request()
322 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len); in __ncp_start_request()
374 iov[0].iov_len = 8; in info_server()
[all …]
/linux-4.1.27/net/ceph/
Dauth_x.c241 (int)th->ticket_blob->vec.iov_len); in process_one_ticket()
292 (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); in ceph_x_build_authorizer()
328 th->ticket_blob->vec.iov_len); in ceph_x_build_authorizer()
335 end = au->buf->vec.iov_base + au->buf->vec.iov_len; in ceph_x_build_authorizer()
345 au->buf->vec.iov_len = p - au->buf->vec.iov_base; in ceph_x_build_authorizer()
347 (int)au->buf->vec.iov_len); in ceph_x_build_authorizer()
348 BUG_ON(au->buf->vec.iov_len > maxlen); in ceph_x_build_authorizer()
365 u32 len = th->ticket_blob->vec.iov_len; in ceph_x_encode_ticket()
478 xi->auth_authorizer.buf->vec.iov_len); in ceph_x_build_request()
568 auth->authorizer_buf_len = au->buf->vec.iov_len; in ceph_x_create_authorizer()
Dmessenger.c599 iov.iov_len = size; in ceph_tcp_sendpage()
794 con->out_kvec[index].iov_len = size; in con_out_kvec_add()
811 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; in con_out_kvec_skip()
1241 con->out_kvec[v].iov_len = sizeof(m->footer); in prepare_write_message_footer()
1245 con->out_kvec[v].iov_len = sizeof(m->old_footer); in prepare_write_message_footer()
1297 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); in prepare_write_message()
1302 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); in prepare_write_message()
1305 con_out_kvec_add(con, m->middle->vec.iov_len, in prepare_write_message()
1314 crc = crc32c(0, m->front.iov_base, m->front.iov_len); in prepare_write_message()
1318 m->middle->vec.iov_len); in prepare_write_message()
[all …]
Dmon_client.c107 monc->m_auth->front.iov_len = len; in __send_prepared_auth_request()
229 msg->front.iov_len = p - msg->front.iov_base; in __send_subscribe()
230 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in __send_subscribe()
244 if (msg->front.iov_len < sizeof(*h)) in handle_subscribe_ack()
369 end = p + msg->front.iov_len; in ceph_monc_handle_map()
553 if (msg->front.iov_len != sizeof(*reply)) in handle_statfs_reply()
901 msg->front.iov_len, in handle_auth_reply()
Dbuffer.c27 b->vec.iov_len = len; in ceph_buffer_new()
Dmsgpool.c78 msg->front.iov_len = pool->front_len; in ceph_msgpool_put()
Dosd_client.c427 memset(msg->front.iov_base, 0, msg->front.iov_len); in ceph_osdc_alloc_request()
1765 end = p + msg->front.iov_len; in handle_reply()
1929 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); in handle_reply()
2071 end = p + msg->front.iov_len; in ceph_osdc_handle_map()
2345 end = p + msg->front.iov_len; in handle_watch_notify()
2474 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); in ceph_osdc_build_request()
2476 msg->front.iov_len = msg_size; in ceph_osdc_build_request()
/linux-4.1.27/drivers/xen/xenbus/
Dxenbus_xs.c282 msg.len += iovec[i].iov_len; in xs_talkv()
293 err = xb_write(iovec[i].iov_base, iovec[i].iov_len); in xs_talkv()
331 iovec.iov_len = strlen(string) + 1; in xs_single()
459 iovec[0].iov_len = strlen(path) + 1; in xenbus_write()
461 iovec[1].iov_len = strlen(string); in xenbus_write()
625 iov[0].iov_len = strlen(path) + 1; in xs_watch()
627 iov[1].iov_len = strlen(token) + 1; in xs_watch()
638 iov[0].iov_len = strlen(path) + 1; in xs_unwatch()
640 iov[1].iov_len = strlen(token) + 1; in xs_unwatch()
/linux-4.1.27/drivers/target/iscsi/
Discsi_target.c758 iov[i].iov_len = cur_len; in iscsit_map_iovec()
1411 iov[iov_count++].iov_len = padding; in iscsit_get_dataout()
1418 iov[iov_count++].iov_len = ISCSI_CRC_LEN; in iscsit_get_dataout()
1669 iov[niov++].iov_len = payload_length; in iscsit_handle_nop_out()
1676 iov[niov++].iov_len = padding; in iscsit_handle_nop_out()
1681 iov[niov++].iov_len = ISCSI_CRC_LEN; in iscsit_handle_nop_out()
2090 iov[niov++].iov_len = payload_length; in iscsit_handle_text_cmd()
2095 iov[niov++].iov_len = padding; in iscsit_handle_text_cmd()
2102 iov[niov++].iov_len = ISCSI_CRC_LEN; in iscsit_handle_text_cmd()
2428 iov[iov_count++].iov_len = padding; in iscsit_handle_immediate_data()
[all …]
Discsi_target_erl1.c72 iov.iov_len = size; in iscsit_dump_data_payload()
89 iov.iov_len = padding; in iscsit_dump_data_payload()
102 iov.iov_len = ISCSI_CRC_LEN; in iscsit_dump_data_payload()
Discsi_target_util.c1197 iov.iov_len = tx_hdr_size; in iscsit_fe_sendpage_sg()
1354 int ret, iov_len; in iscsit_do_tx_data() local
1369 iov_len = count->iov_count; in iscsit_do_tx_data()
1371 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, in iscsit_do_tx_data()
Discsi_target_parameters.c34 iov.iov_len = length; in iscsi_login_rx_data()
66 iov[0].iov_len = ISCSI_HDR_LEN; in iscsi_login_tx_data()
70 iov[1].iov_len = text_length; in iscsi_login_tx_data()
/linux-4.1.27/Documentation/mic/mpssd/
Dmpssd.c358 sum += copy->iov[i].iov_len; in sum_iovec_len()
383 copy->iov[i].iov_base, copy->iov[i].iov_len); in disp_iovec()
397 copy->iov[1].iov_len = len - sizeof(struct virtio_net_hdr); in txrx_prepare()
399 copy->iov[0].iov_len = len; in txrx_prepare()
526 { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, in virtio_net()
527 { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, in virtio_net()
528 { { .iov_base = vnet_hdr[1], .iov_len = sizeof(vnet_hdr[1]) }, in virtio_net()
529 { .iov_base = vnet_buf[1], .iov_len = sizeof(vnet_buf[1]) } }, in virtio_net()
629 iov0[1].iov_len = MAX_NET_PKT_SIZE; in virtio_net()
668 iov1[1].iov_len = copy.out_len - in virtio_net()
[all …]
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
Dlib-move.c166 nob += (iov++)->iov_len; in lnet_iov_nob()
185 while (doffset >= diov->iov_len) { in lnet_copy_iov2iov()
186 doffset -= diov->iov_len; in lnet_copy_iov2iov()
194 while (soffset >= siov->iov_len) { in lnet_copy_iov2iov()
195 soffset -= siov->iov_len; in lnet_copy_iov2iov()
204 this_nob = min(diov->iov_len - doffset, in lnet_copy_iov2iov()
205 siov->iov_len - soffset); in lnet_copy_iov2iov()
212 if (diov->iov_len > doffset + this_nob) { in lnet_copy_iov2iov()
220 if (siov->iov_len > soffset + this_nob) { in lnet_copy_iov2iov()
246 while (offset >= src->iov_len) { /* skip initial frags */ in lnet_extract_iov()
[all …]
Dlib-md.c118 if (lmd->md_iov.iov[i].iov_len <= 0) in lnet_md_build()
121 total_length += lmd->md_iov.iov[i].iov_len; in lnet_md_build()
155 lmd->md_iov.iov[0].iov_len = umd->length; in lnet_md_build()
/linux-4.1.27/fs/jffs2/
Dwrite.c78 vecs[0].iov_len = sizeof(*ri); in jffs2_write_dnode()
80 vecs[1].iov_len = datalen; in jffs2_write_dnode()
98 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); in jffs2_write_dnode()
238 vecs[0].iov_len = sizeof(*rd); in jffs2_write_dirent()
240 vecs[1].iov_len = namelen; in jffs2_write_dirent()
256 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); in jffs2_write_dirent()
Dwritev.c43 vecs[0].iov_len = len; in jffs2_flash_direct_write()
Dxattr.c297 vecs[0].iov_len = sizeof(rx); in save_xattr_datum()
299 vecs[1].iov_len = xd->name_len + 1 + xd->value_len; in save_xattr_datum()
300 totlen = vecs[0].iov_len + vecs[1].iov_len; in save_xattr_datum()
314 rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len)); in save_xattr_datum()
Dsummary.c810 vecs[0].iov_len = sizeof(isum); in jffs2_sum_write_data()
812 vecs[1].iov_len = datasize; in jffs2_sum_write_data()
Derase.c463 vecs[0].iov_len = sizeof(marker); in jffs2_mark_erased_block()
Dwbuf.c860 int vlen = invecs[invec].iov_len; in jffs2_flash_writev()
950 vecs[0].iov_len = len; in jffs2_flash_write()
/linux-4.1.27/fs/9p/
Dxattr.c31 struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; in v9fs_fid_xattr_get()
108 struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; in v9fs_fid_xattr_set()
Dvfs_dir.c130 kvec.iov_len = buflen; in v9fs_dir_readdir()
/linux-4.1.27/drivers/vhost/
Dvringh.c82 partlen = min(iov->iov[iov->i].iov_len, len); in vringh_iov_xfer()
90 iov->iov[iov->i].iov_len -= partlen; in vringh_iov_xfer()
93 if (!iov->iov[iov->i].iov_len) { in vringh_iov_xfer()
95 iov->iov[iov->i].iov_len = iov->consumed; in vringh_iov_xfer()
364 iov->iov[iov->used].iov_len = len; in __vringh_iov()
679 BUILD_BUG_ON(offsetof(struct iovec, iov_len) != in vringh_getdesc_user()
680 offsetof(struct kvec, iov_len)); in vringh_getdesc_user()
683 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) in vringh_getdesc_user()
684 != sizeof(((struct kvec *)NULL)->iov_len)); in vringh_getdesc_user()
Dscsi.c226 static int iov_num_pages(void __user *iov_base, size_t iov_len) in iov_num_pages() argument
228 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) - in iov_num_pages()
643 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { in vhost_scsi_do_evt_work()
645 vq->iov[out].iov_len); in vhost_scsi_do_evt_work()
869 size_t len = iter->iov[i].iov_len - off; in vhost_scsi_iov_to_sgl()
1055 if (unlikely(vq->iov[out].iov_len < rsp_size)) { in vhost_scsi_handle_vq()
1057 " size, got %zu bytes\n", vq->iov[out].iov_len); in vhost_scsi_handle_vq()
Dvhost.c1088 _iov->iov_len = min((u64)len - s, size); in translate_desc()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_user_sdma.c198 iov[i].iov_base, iov[i].iov_len); in ipath_user_sdma_coalesce()
204 mpage += iov[i].iov_len; in ipath_user_sdma_coalesce()
205 len += iov[i].iov_len; in ipath_user_sdma_coalesce()
232 const unsigned long len = iov->iov_len; in ipath_user_sdma_num_pages()
335 addr, iov[idx].iov_len, in ipath_user_sdma_pin_pkt()
420 len = iov[idx].iov_len; in ipath_user_sdma_queue_pkts()
482 const size_t slen = iov[idx].iov_len; in ipath_user_sdma_queue_pkts()
Dipath_common.h525 __u64 iov_len; member
/linux-4.1.27/include/uapi/linux/
Duio.h19 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ member
/linux-4.1.27/drivers/misc/mic/host/
Dmic_virtio.c198 total += iov->iov[i].iov_len; in mic_vringh_iov_consumed()
217 partlen = min(iov->iov[iov->i].iov_len, len); in mic_vringh_copy()
221 iov->iov[iov->i].iov_len, in mic_vringh_copy()
226 iov->iov[iov->i].iov_len, in mic_vringh_copy()
237 iov->iov[iov->i].iov_len -= partlen; in mic_vringh_copy()
239 if (!iov->iov[iov->i].iov_len) { in mic_vringh_copy()
241 iov->iov[iov->i].iov_len = iov->consumed; in mic_vringh_copy()
291 len = iov.iov_len; in _mic_virtio_copy()
/linux-4.1.27/net/rds/
Dtcp_send.c70 .iov_len = len, in rds_tcp_sendmsg()
76 return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); in rds_tcp_sendmsg()
/linux-4.1.27/fs/
Dread_write.c413 struct iovec iov = { .iov_base = buf, .iov_len = len }; in new_sync_read()
469 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; in new_sync_write()
644 if (len + iov->iov_len >= to) { in iov_shorten()
645 iov->iov_len = to - len; in iov_shorten()
648 len += iov->iov_len; in iov_shorten()
680 nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos); in do_loop_readv_writev()
688 if (nr != iovec.iov_len) in do_loop_readv_writev()
750 ssize_t len = (ssize_t)iov[seg].iov_len; in rw_copy_check_uvector()
765 iov[seg].iov_len = len; in rw_copy_check_uvector()
Dsplice.c653 vec[i].iov_len = this_len; in default_file_splice_read()
672 this_len = min_t(size_t, vec[i].iov_len, res); in default_file_splice_read()
1456 len = entry.iov_len; in get_iovec_page_array()
1671 get_user(v.iov_len, &iov32[i].iov_len) || in COMPAT_SYSCALL_DEFINE4()
1673 put_user(v.iov_len, &iov[i].iov_len)) in COMPAT_SYSCALL_DEFINE4()
Dcompat_ioctl.c255 compat_uint_t iov_len; member
268 get_user(len, &iov32[i].iov_len) || in sg_build_iovec()
270 put_user(len, &iov[i].iov_len)) in sg_build_iovec()
Dcompat.c593 if (__get_user(len, &uvector->iov_len) || in compat_rw_copy_check_uvector()
609 iov->iov_len = (compat_size_t) len; in compat_rw_copy_check_uvector()
/linux-4.1.27/drivers/mtd/
Dmtdconcat.c170 total_len += vecs[i].iov_len; in concat_writev()
199 if (size <= vecs_copy[entry_high].iov_len) in concat_writev()
201 size -= vecs_copy[entry_high++].iov_len; in concat_writev()
204 old_iov_len = vecs_copy[entry_high].iov_len; in concat_writev()
205 vecs_copy[entry_high].iov_len = size; in concat_writev()
210 vecs_copy[entry_high].iov_len = old_iov_len - size; in concat_writev()
Dmtdcore.c1121 if (!vecs[i].iov_len) in default_mtd_writev()
1123 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, in default_mtd_writev()
1126 if (ret || thislen != vecs[i].iov_len) in default_mtd_writev()
1128 to += vecs[i].iov_len; in default_mtd_writev()
/linux-4.1.27/net/tipc/
Dserver.c262 iov.iov_len = s->max_rcvbuf_size; in tipc_receive_from_sock()
264 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, in tipc_receive_from_sock()
418 entry->iov.iov_len = len; in tipc_alloc_entry()
508 e->iov.iov_len); in tipc_send_to_sock()
Dsubscr.c74 msg_sect.iov_len = sizeof(struct tipc_event); in subscr_send_event()
81 msg_sect.iov_base, msg_sect.iov_len); in subscr_send_event()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_user_sdma.c585 iov[i].iov_base, iov[i].iov_len); in qib_user_sdma_coalesce()
591 mpage += iov[i].iov_len; in qib_user_sdma_coalesce()
592 len += iov[i].iov_len; in qib_user_sdma_coalesce()
612 const unsigned long len = iov->iov_len; in qib_user_sdma_num_pages()
733 iov[idx].iov_len, npages); in qib_user_sdma_pin_pkt()
836 len = iov[idx].iov_len; in qib_user_sdma_queue_pkts()
881 const size_t slen = iov[idx].iov_len; in qib_user_sdma_queue_pkts()
925 tidsmsize = iov[idx].iov_len; in qib_user_sdma_queue_pkts()
Dqib_common.h490 __u64 iov_len; member
/linux-4.1.27/drivers/mtd/lpddr/
Dlpddr_cmds.c427 if (n > vec->iov_len - vec_seek) in do_write_buffer()
428 n = vec->iov_len - vec_seek; in do_write_buffer()
447 if (vec_seek == vec->iov_len) { in do_write_buffer()
625 vec.iov_len = len; in lpddr_write_buffers()
643 len += vecs[i].iov_len; in lpddr_writev()
/linux-4.1.27/tools/hv/
Dhv_vss_daemon.c159 iov[0].iov_len = sizeof(nlh); in netlink_send()
162 iov[1].iov_len = size; in netlink_send()
Dhv_kvp_daemon.c1404 iov[0].iov_len = sizeof(nlh); in netlink_send()
1407 iov[1].iov_len = size; in netlink_send()
/linux-4.1.27/net/ipv4/
Dtcp_ipv4.c625 arg.iov[0].iov_len = sizeof(rep.th); in tcp_v4_send_reset()
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; in tcp_v4_send_reset()
667 rep.th.doff = arg.iov[0].iov_len / 4; in tcp_v4_send_reset()
676 arg.iov[0].iov_len, IPPROTO_TCP, 0); in tcp_v4_send_reset()
690 &arg, arg.iov[0].iov_len); in tcp_v4_send_reset()
729 arg.iov[0].iov_len = sizeof(rep.th); in tcp_v4_send_ack()
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_v4_send_ack()
742 rep.th.doff = arg.iov[0].iov_len / 4; in tcp_v4_send_ack()
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; in tcp_v4_send_ack()
757 rep.th.doff = arg.iov[0].iov_len/4; in tcp_v4_send_ack()
[all …]
/linux-4.1.27/include/scsi/
Dsg.h39 size_t iov_len; /* Length in bytes */ member
/linux-4.1.27/tools/virtio/
Dvringh_test.c523 assert(riov.iov[0].iov_len == 1); in main()
527 assert(wiov.iov[0].iov_len == 2); in main()
531 assert(wiov.iov[0].iov_len == 1); in main()
533 assert(wiov.iov[1].iov_len == 1); in main()
/linux-4.1.27/fs/ceph/
Dmds_client.c300 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); in parse_reply_info()
1466 (int)msg->front.iov_len); in ceph_add_cap_releases()
1469 msg->front.iov_len = sizeof(*head); in ceph_add_cap_releases()
1569 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in ceph_send_cap_releases()
1595 msg->front.iov_len = sizeof(*head); in discard_cap_releases()
1611 msg->front.iov_len = sizeof(*head); in discard_cap_releases()
1930 end = msg->front.iov_base + msg->front.iov_len; in create_request_message()
1976 msg->front.iov_len = p - msg->front.iov_base; in create_request_message()
1977 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in create_request_message()
2068 msg->front.iov_len = p - msg->front.iov_base; in __prepare_send_request()
[all …]
Dxattr.c579 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); in __build_xattrs()
588 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) { in __build_xattrs()
590 end = p + ci->i_xattrs.blob->vec.iov_len; in __build_xattrs()
705 ci->i_xattrs.prealloc_blob->vec.iov_len = in __ceph_build_xattrs_blob()
Dcaps.c1000 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); in send_cap_msg()
1048 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len); in send_cap_msg()
1049 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len); in send_cap_msg()
1071 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); in __queue_cap_release()
1074 item = msg->front.iov_base + msg->front.iov_len; in __queue_cap_release()
1082 msg->front.iov_len += sizeof(*item); in __queue_cap_release()
1090 (int)msg->front.iov_len); in __queue_cap_release()
3112 end = msg->front.iov_base + msg->front.iov_len; in ceph_handle_caps()
3114 if (msg->front.iov_len < sizeof(*h)) in ceph_handle_caps()
Dsnap.c816 void *e = p + msg->front.iov_len; in ceph_handle_snap()
824 if (msg->front.iov_len < sizeof(*h)) in ceph_handle_snap()
/linux-4.1.27/fs/dlm/
Dlowcomms.c825 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); in receive_from_sock()
827 iov[1].iov_len = 0; in receive_from_sock()
835 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); in receive_from_sock()
836 iov[1].iov_len = con->cb.base; in receive_from_sock()
840 len = iov[0].iov_len + iov[1].iov_len; in receive_from_sock()
1113 iov[0].iov_len = len; in sctp_init_assoc()
/linux-4.1.27/arch/cris/arch-v32/drivers/
Dcryptocop.c549 …n->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inj… in create_input_descriptors()
550 out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len; in create_input_descriptors()
569 rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset; in create_input_descriptors()
576 …outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operati… in create_input_descriptors()
584 if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) { in create_input_descriptors()
615 …int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offs… in create_output_descriptors()
634 if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) { in create_output_descriptors()
2465 iov[*iovix].iov_len = tmplen; in map_pages_to_iovec()
2779 …cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_M… in cryptocop_ioctl_process()
2781 tmplen -= cop->tfrm_op.indata[0].iov_len; in cryptocop_ioctl_process()
[all …]
/linux-4.1.27/fs/afs/
Drxrpc.c304 iov->iov_len = to - offset; in afs_send_pages()
383 iov[0].iov_len = call->request_size; in afs_make_call()
811 iov[0].iov_len = len; in afs_send_simple_reply()
/linux-4.1.27/arch/um/os-Linux/
Dfile.c490 .iov_len = sizeof(*helper_pid_out) }); in os_rcv_fd()
500 else if (n != iov.iov_len) in os_rcv_fd()
/linux-4.1.27/block/
Dblk-map.c78 if (!iov.iov_len) in blk_rq_map_user_iov()
Dbio.c1171 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) in bio_copy_user_iov()
1296 unsigned long len = iov.iov_len; in bio_map_user_iov()
1328 unsigned long len = iov.iov_len; in bio_map_user_iov()
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/
Dlinux-tcpip.c267 .iov_len = nob in libcfs_sock_write()
328 .iov_len = nob in libcfs_sock_read()
/linux-4.1.27/tools/lguest/
Dlguest.c272 if (iov[i].iov_len) in iov_empty()
287 used = iov[i].iov_len < len ? iov[i].iov_len : len; in iov_consume()
293 iov[i].iov_len -= used; in iov_consume()
882 iov[*out_num + *in_num].iov_len = desc[i].len; in wait_for_vq_desc()
2956 if (iov[i].iov_len > 0) { in blk_request()
2957 in = iov[i].iov_base + iov[i].iov_len - 1; in blk_request()
2958 iov[i].iov_len--; in blk_request()
/linux-4.1.27/fs/ocfs2/dlm/
Ddlmconvert.c385 vec[0].iov_len = sizeof(struct dlm_convert_lock); in dlm_send_remote_convert_request()
390 vec[1].iov_len = DLM_LVB_LEN; in dlm_send_remote_convert_request()
Ddlmast.c472 vec[0].iov_len = sizeof(struct dlm_proxy_ast); in dlm_send_proxy_ast_msg()
476 vec[1].iov_len = DLM_LVB_LEN; in dlm_send_proxy_ast_msg()
Ddlmunlock.c342 vec[0].iov_len = sizeof(struct dlm_unlock_lock); in dlm_send_remote_unlock_request()
347 vec[1].iov_len = DLM_LVB_LEN; in dlm_send_remote_unlock_request()
/linux-4.1.27/arch/um/drivers/
Dmconsole_user.c46 iov.iov_len = strlen(reply); in mconsole_reply_v0()
/linux-4.1.27/drivers/media/dvb-core/
Ddvb_net.c75 c = crc32_be( c, iov[j].iov_base, iov[j].iov_len ); in iov_crc32()
602 hexdump( iov[0].iov_base, iov[0].iov_len ); in dvb_net_ule()
603 hexdump( iov[1].iov_base, iov[1].iov_len ); in dvb_net_ule()
604 hexdump( iov[2].iov_base, iov[2].iov_len ); in dvb_net_ule()
/linux-4.1.27/fs/fuse/
Dfile.c2279 size_t iov_len = ii.iov->iov_len - ii.iov_offset; in fuse_ioctl_copy_user() local
2280 size_t copy = min(todo, iov_len); in fuse_ioctl_copy_user()
2327 dst[i].iov_len = ciov[i].iov_len; in fuse_copy_ioctl_iovec_old()
2347 if (iov->iov_len > (size_t) max) in fuse_verify_ioctl_iov()
2349 max -= iov->iov_len; in fuse_verify_ioctl_iov()
2376 dst[i].iov_len = (size_t) fiov[i].len; in fuse_copy_ioctl_iovec()
2381 (compat_size_t) dst[i].iov_len != fiov[i].len)) in fuse_copy_ioctl_iovec()
2480 iov->iov_len = _IOC_SIZE(cmd); in fuse_do_ioctl()
/linux-4.1.27/drivers/atm/
Dnicstar.c2100 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; in dequeue_rx()
2101 iovb->len += iov->iov_len; in dequeue_rx()
2136 (skb->data + iov->iov_len - 6); in dequeue_rx()
2281 iov->iov_len); in dequeue_rx()
2282 skb_put(hb, iov->iov_len); in dequeue_rx()
2283 remaining = len - iov->iov_len; in dequeue_rx()
2292 min_t(int, remaining, iov->iov_len); in dequeue_rx()
Dhorizon.c989 rx_bytes = dev->rx_iovec->iov_len; in rx_schedule()
1150 tx_bytes = dev->tx_iovec->iov_len; in tx_schedule()
Dzatm.c689 *put++ = ((struct iovec *) skb->data)[i].iov_len; in do_tx()
/linux-4.1.27/Documentation/networking/timestamping/
Dtimestamping.c295 entry.iov_len = sizeof(data); in recvpacket()
Dtxtimestamp.c248 entry.iov_len = cfg_payload_len; in recv_errmsg()
/linux-4.1.27/fs/ocfs2/cluster/
Dtcp.c919 struct kvec vec = { .iov_len = len, .iov_base = data, }; in o2net_recv_tcp_msg()
1096 vec[0].iov_len = sizeof(struct o2net_msg); in o2net_send_message_vec()
1152 .iov_len = len, in o2net_send_message()
1164 .iov_len = sizeof(struct o2net_msg), in o2net_send_status_magic()
/linux-4.1.27/drivers/block/
Dskd_main.c1436 sksgio->iov[0].iov_len = sgp->dxfer_len; in skd_sg_io_get_and_check_args()
1465 if (iov_data_len + iov[i].iov_len < iov_data_len) in skd_sg_io_get_and_check_args()
1467 iov_data_len += iov[i].iov_len; in skd_sg_io_get_and_check_args()
1483 if (!access_ok(acc, iov->iov_base, iov->iov_len)) { in skd_sg_io_get_and_check_args()
1486 iov->iov_base, (int)iov->iov_len); in skd_sg_io_get_and_check_args()
1652 curiov.iov_len = 0; in skd_sg_io_copy_buffer()
1664 if (curiov.iov_len == 0) { in skd_sg_io_copy_buffer()
1677 nbytes = min_t(u32, nbytes, curiov.iov_len); in skd_sg_io_copy_buffer()
1689 curiov.iov_len -= nbytes; in skd_sg_io_copy_buffer()
Dnvme-scsi.c388 xfer_len = min(remaining, sgl.iov_len); in nvme_trans_copy_to_user()
429 xfer_len = min(remaining, sgl.iov_len); in nvme_trans_copy_from_user()
2104 unit_len = sgl.iov_len; in nvme_trans_do_nvme_io()
2210 sum_iov_len += sgl.iov_len; in nvme_trans_io()
2212 if (sgl.iov_len % (1 << ns->lba_shift) != 0) { in nvme_trans_io()
Dnbd.c167 iov.iov_len = size; in sock_xmit()
/linux-4.1.27/drivers/mtd/chips/
Dcfi_cmdset_0001.c1762 if (n > vec->iov_len - vec_seek) in do_write_buffer()
1763 n = vec->iov_len - vec_seek; in do_write_buffer()
1783 if (vec_seek == vec->iov_len) { in do_write_buffer()
1849 len += vecs[i].iov_len; in cfi_intelext_writev()
1895 vec.iov_len = len; in cfi_intelext_write_buffers()
Dcfi_cmdset_0020.c682 size_t elem_len = vecs[i].iov_len; in cfi_staa_writev()
/linux-4.1.27/drivers/target/
Dtarget_core_user.c375 iov->iov_len = copy_bytes; in tcmu_queue_cmd_ring()
388 iov->iov_len = copy_bytes; in tcmu_queue_cmd_ring()
/linux-4.1.27/drivers/isdn/mISDN/
Dl1oip_core.c349 hc->sendiov.iov_len = len; in l1oip_socket_send()
728 .iov_len = recvbuf_size, in l1oip_socket_thread()
/linux-4.1.27/drivers/scsi/megaraid/
Dmegaraid_sas_base.c6102 if (!ioc->sgl[i].iov_len) in megasas_mgmt_fw_ioctl()
6106 ioc->sgl[i].iov_len, in megasas_mgmt_fw_ioctl()
6120 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); in megasas_mgmt_fw_ioctl()
6127 (u32) (ioc->sgl[i].iov_len))) { in megasas_mgmt_fw_ioctl()
6164 ioc->sgl[i].iov_len)) { in megasas_mgmt_fw_ioctl()
6434 copy_in_user(&ioc->sgl[i].iov_len, in megasas_mgmt_compat_ioctl_fw()
6435 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) in megasas_mgmt_compat_ioctl_fw()
/linux-4.1.27/net/bluetooth/rfcomm/
Dcore.c1119 iv[0].iov_len = 5; in rfcomm_send_test()
1121 iv[1].iov_len = len; in rfcomm_send_test()
1123 iv[2].iov_len = 1; in rfcomm_send_test()
/linux-4.1.27/drivers/net/caif/
Dcaif_virtio.c289 riov->iov[riov->i].iov_len); in cfv_rx_poll()
/linux-4.1.27/tools/testing/selftests/net/
Dpsock_tpacket.c680 ring->rd[i].iov_len = ring->flen; in mmap_ring()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd_cb.c713 while (offset >= iov->iov_len) { in kiblnd_setup_rd_iov()
714 offset -= iov->iov_len; in kiblnd_setup_rd_iov()
732 fragnob = min((int)(iov->iov_len - offset), nob); in kiblnd_setup_rd_iov()
738 if (offset + fragnob < iov->iov_len) { in kiblnd_setup_rd_iov()
/linux-4.1.27/net/netfilter/ipvs/
Dip_vs_sync.c1571 iov.iov_len = length; in ip_vs_send_async()
1605 iov.iov_len = (size_t)buflen; in ip_vs_receive()
/linux-4.1.27/sound/core/
Dpcm_native.c3104 if (!frame_aligned(runtime, to->iov->iov_len)) in snd_pcm_readv()
3106 frames = bytes_to_samples(runtime, to->iov->iov_len); in snd_pcm_readv()
3139 !frame_aligned(runtime, from->iov->iov_len)) in snd_pcm_writev()
3141 frames = bytes_to_samples(runtime, from->iov->iov_len); in snd_pcm_writev()
/linux-4.1.27/drivers/scsi/
Discsi_tcp.c294 .iov_len = copy in iscsi_sw_tcp_xmit_segment()
/linux-4.1.27/net/bluetooth/
Da2mp.c61 iv.iov_len = total_len; in a2mp_send()
D6lowpan.c536 iv.iov_len = skb->len; in send_pkt()
Dsmp.c597 iv[0].iov_len = 1; in smp_send_cmd()
600 iv[1].iov_len = len; in smp_send_cmd()
/linux-4.1.27/fs/nfs/
Dcallback_xdr.c908 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); in nfs4_callback_compound()
Dnfs4xdr.c5219 pg_offset = xdr->buf->head[0].iov_len; in decode_getacl()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dvvp_io.c233 count = iov.iov_len; in vvp_mmap_locks()
/linux-4.1.27/arch/alpha/kernel/
Dosf_sys.c1370 int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; in osf_fix_iov_len()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_main.c1781 iov.iov_len = size; in drbd_send()
1820 iov.iov_len -= rv; in drbd_send()
Ddrbd_receiver.c484 .iov_len = size, in drbd_recv_short()
/linux-4.1.27/net/9p/
Dclient.c2055 struct kvec kv = {.iov_base = data, .iov_len = count}; in p9_client_readdir()
/linux-4.1.27/net/core/
Dsock.c2200 iov.iov_len = size; in sock_no_sendpage()
/linux-4.1.27/Documentation/networking/
Dpacket_mmap.txt852 ring->rd[i].iov_len = ring->req.tp_block_size;
/linux-4.1.27/drivers/net/ppp/
Dppp_generic.c465 iov.iov_len = count; in ppp_read()