Lines Matching refs:wr
2034 struct ib_send_wr *wr, in build_sriov_qp0_header() argument
2041 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); in build_sriov_qp0_header()
2049 if (wr->opcode != IB_WR_SEND) in build_sriov_qp0_header()
2054 for (i = 0; i < wr->num_sge; ++i) in build_sriov_qp0_header()
2055 send_size += wr->sg_list[i].length; in build_sriov_qp0_header()
2080 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2084 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_sriov_qp0_header()
2156 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, in build_mlx_header() argument
2163 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); in build_mlx_header()
2177 for (i = 0; i < wr->num_sge; ++i) in build_mlx_header()
2178 send_size += wr->sg_list[i].length; in build_mlx_header()
2255 switch (wr->opcode) { in build_mlx_header()
2263 sqp->ud_header.immediate_data = wr->ex.imm_data; in build_mlx_header()
2306 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_mlx_header()
2310 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
2312 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_mlx_header()
2314 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? in build_mlx_header()
2315 sqp->qkey : wr->wr.ud.remote_qkey); in build_mlx_header()
2403 static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) in set_fmr_seg() argument
2405 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); in set_fmr_seg()
2408 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) in set_fmr_seg()
2410 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | in set_fmr_seg()
2413 fseg->flags = convert_access(wr->wr.fast_reg.access_flags); in set_fmr_seg()
2414 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); in set_fmr_seg()
2416 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); in set_fmr_seg()
2417 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); in set_fmr_seg()
2419 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); in set_fmr_seg()
2424 static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) in set_bind_seg() argument
2427 convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & in set_bind_seg()
2432 if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) in set_bind_seg()
2434 if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) in set_bind_seg()
2436 bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); in set_bind_seg()
2437 bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); in set_bind_seg()
2438 bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); in set_bind_seg()
2439 bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); in set_bind_seg()
2456 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) in set_atomic_seg() argument
2458 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
2459 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); in set_atomic_seg()
2460 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); in set_atomic_seg()
2461 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { in set_atomic_seg()
2462 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); in set_atomic_seg()
2463 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); in set_atomic_seg()
2465 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); in set_atomic_seg()
2472 struct ib_send_wr *wr) in set_masked_atomic_seg() argument
2474 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); in set_masked_atomic_seg()
2475 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); in set_masked_atomic_seg()
2476 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); in set_masked_atomic_seg()
2477 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); in set_masked_atomic_seg()
2481 struct ib_send_wr *wr) in set_datagram_seg() argument
2483 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); in set_datagram_seg()
2484 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); in set_datagram_seg()
2485 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); in set_datagram_seg()
2486 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; in set_datagram_seg()
2487 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); in set_datagram_seg()
2492 struct ib_send_wr *wr, in set_tunnel_datagram_seg() argument
2495 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; in set_tunnel_datagram_seg()
2514 static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) in build_tunnel_header() argument
2518 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); in build_tunnel_header()
2523 hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_tunnel_header()
2524 hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); in build_tunnel_header()
2525 hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); in build_tunnel_header()
2597 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, in build_lso_seg() argument
2601 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); in build_lso_seg()
2607 wr->num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
2610 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); in build_lso_seg()
2612 *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); in build_lso_seg()
2617 static __be32 send_ieth(struct ib_send_wr *wr) in send_ieth() argument
2619 switch (wr->opcode) { in send_ieth()
2622 return wr->ex.imm_data; in send_ieth()
2625 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
2639 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in mlx4_ib_post_send() argument
2663 *bad_wr = wr; in mlx4_ib_post_send()
2670 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_send()
2676 *bad_wr = wr; in mlx4_ib_post_send()
2680 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
2682 *bad_wr = wr; in mlx4_ib_post_send()
2687 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
2690 (wr->send_flags & IB_SEND_SIGNALED ? in mlx4_ib_post_send()
2692 (wr->send_flags & IB_SEND_SOLICITED ? in mlx4_ib_post_send()
2694 ((wr->send_flags & IB_SEND_IP_CSUM) ? in mlx4_ib_post_send()
2699 ctrl->imm = send_ieth(wr); in mlx4_ib_post_send()
2707 switch (wr->opcode) { in mlx4_ib_post_send()
2711 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, in mlx4_ib_post_send()
2712 wr->wr.atomic.rkey); in mlx4_ib_post_send()
2715 set_atomic_seg(wqe, wr); in mlx4_ib_post_send()
2724 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, in mlx4_ib_post_send()
2725 wr->wr.atomic.rkey); in mlx4_ib_post_send()
2728 set_masked_atomic_seg(wqe, wr); in mlx4_ib_post_send()
2739 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, in mlx4_ib_post_send()
2740 wr->wr.rdma.rkey); in mlx4_ib_post_send()
2748 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); in mlx4_ib_post_send()
2756 set_fmr_seg(wqe, wr); in mlx4_ib_post_send()
2764 set_bind_seg(wqe, wr); in mlx4_ib_post_send()
2775 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2777 *bad_wr = wr; in mlx4_ib_post_send()
2786 set_datagram_seg(wqe, wr); in mlx4_ib_post_send()
2793 set_datagram_seg(wqe, wr); in mlx4_ib_post_send()
2797 if (wr->opcode == IB_WR_LSO) { in mlx4_ib_post_send()
2798 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); in mlx4_ib_post_send()
2800 *bad_wr = wr; in mlx4_ib_post_send()
2810 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2812 *bad_wr = wr; in mlx4_ib_post_send()
2821 build_tunnel_header(wr, wqe, &seglen); in mlx4_ib_post_send()
2831 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, in mlx4_ib_post_send()
2835 build_tunnel_header(wr, wqe, &seglen); in mlx4_ib_post_send()
2842 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2844 *bad_wr = wr; in mlx4_ib_post_send()
2863 dseg += wr->num_sge - 1; in mlx4_ib_post_send()
2864 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); in mlx4_ib_post_send()
2875 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) in mlx4_ib_post_send()
2876 set_data_seg(dseg, wr->sg_list + i); in mlx4_ib_post_send()
2886 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? in mlx4_ib_post_send()
2896 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { in mlx4_ib_post_send()
2897 *bad_wr = wr; in mlx4_ib_post_send()
2902 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | in mlx4_ib_post_send()
2917 if (wr->next) { in mlx4_ib_post_send()
2953 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in mlx4_ib_post_recv() argument
2971 *bad_wr = wr; in mlx4_ib_post_recv()
2978 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_recv()
2981 *bad_wr = wr; in mlx4_ib_post_recv()
2985 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
2987 *bad_wr = wr; in mlx4_ib_post_recv()
3002 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in mlx4_ib_post_recv()
3008 for (i = 0; i < wr->num_sge; ++i) in mlx4_ib_post_recv()
3009 __set_data_seg(scat + i, wr->sg_list + i); in mlx4_ib_post_recv()
3017 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()