Lines Matching refs:wr
2143 struct ib_ud_wr *wr, in build_sriov_qp0_header() argument
2150 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header()
2158 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header()
2163 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header()
2164 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header()
2189 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2193 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header()
2265 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, in build_mlx_header() argument
2272 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header()
2286 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header()
2287 send_size += wr->wr.sg_list[i].length; in build_mlx_header()
2367 switch (wr->wr.opcode) { in build_mlx_header()
2375 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
2418 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
2422 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); in build_mlx_header()
2424 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
2426 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
2427 sqp->qkey : wr->remote_qkey); in build_mlx_header()
2516 struct ib_reg_wr *wr) in set_reg_seg() argument
2518 struct mlx4_ib_mr *mr = to_mmr(wr->mr); in set_reg_seg()
2520 fseg->flags = convert_access(wr->access); in set_reg_seg()
2521 fseg->mem_key = cpu_to_be32(wr->key); in set_reg_seg()
2532 struct ib_bind_mw_wr *wr) in set_bind_seg() argument
2535 convert_access(wr->bind_info.mw_access_flags) & in set_bind_seg()
2540 if (wr->mw->type == IB_MW_TYPE_2) in set_bind_seg()
2542 if (wr->bind_info.mw_access_flags & IB_ZERO_BASED) in set_bind_seg()
2544 bseg->new_rkey = cpu_to_be32(wr->rkey); in set_bind_seg()
2545 bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey); in set_bind_seg()
2546 bseg->addr = cpu_to_be64(wr->bind_info.addr); in set_bind_seg()
2547 bseg->length = cpu_to_be64(wr->bind_info.length); in set_bind_seg()
2565 struct ib_atomic_wr *wr) in set_atomic_seg() argument
2567 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
2568 aseg->swap_add = cpu_to_be64(wr->swap); in set_atomic_seg()
2569 aseg->compare = cpu_to_be64(wr->compare_add); in set_atomic_seg()
2570 } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { in set_atomic_seg()
2571 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
2572 aseg->compare = cpu_to_be64(wr->compare_add_mask); in set_atomic_seg()
2574 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
2581 struct ib_atomic_wr *wr) in set_masked_atomic_seg() argument
2583 aseg->swap_add = cpu_to_be64(wr->swap); in set_masked_atomic_seg()
2584 aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); in set_masked_atomic_seg()
2585 aseg->compare = cpu_to_be64(wr->compare_add); in set_masked_atomic_seg()
2586 aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); in set_masked_atomic_seg()
2590 struct ib_ud_wr *wr) in set_datagram_seg() argument
2592 memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); in set_datagram_seg()
2593 dseg->dqpn = cpu_to_be32(wr->remote_qpn); in set_datagram_seg()
2594 dseg->qkey = cpu_to_be32(wr->remote_qkey); in set_datagram_seg()
2595 dseg->vlan = to_mah(wr->ah)->av.eth.vlan; in set_datagram_seg()
2596 memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); in set_datagram_seg()
2601 struct ib_ud_wr *wr, in set_tunnel_datagram_seg() argument
2604 union mlx4_ext_av *av = &to_mah(wr->ah)->av; in set_tunnel_datagram_seg()
2623 static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) in build_tunnel_header() argument
2627 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_tunnel_header()
2632 hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); in build_tunnel_header()
2633 hdr.pkey_index = cpu_to_be16(wr->pkey_index); in build_tunnel_header()
2634 hdr.qkey = cpu_to_be32(wr->remote_qkey); in build_tunnel_header()
2706 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, in build_lso_seg() argument
2710 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); in build_lso_seg()
2716 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
2719 memcpy(wqe->header, wr->header, wr->hlen); in build_lso_seg()
2721 *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); in build_lso_seg()
2726 static __be32 send_ieth(struct ib_send_wr *wr) in send_ieth() argument
2728 switch (wr->opcode) { in send_ieth()
2731 return wr->ex.imm_data; in send_ieth()
2734 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
2748 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in mlx4_ib_post_send() argument
2772 *bad_wr = wr; in mlx4_ib_post_send()
2779 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_send()
2785 *bad_wr = wr; in mlx4_ib_post_send()
2789 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
2791 *bad_wr = wr; in mlx4_ib_post_send()
2796 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
2799 (wr->send_flags & IB_SEND_SIGNALED ? in mlx4_ib_post_send()
2801 (wr->send_flags & IB_SEND_SOLICITED ? in mlx4_ib_post_send()
2803 ((wr->send_flags & IB_SEND_IP_CSUM) ? in mlx4_ib_post_send()
2808 ctrl->imm = send_ieth(wr); in mlx4_ib_post_send()
2816 switch (wr->opcode) { in mlx4_ib_post_send()
2820 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mlx4_ib_post_send()
2821 atomic_wr(wr)->rkey); in mlx4_ib_post_send()
2824 set_atomic_seg(wqe, atomic_wr(wr)); in mlx4_ib_post_send()
2833 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mlx4_ib_post_send()
2834 atomic_wr(wr)->rkey); in mlx4_ib_post_send()
2837 set_masked_atomic_seg(wqe, atomic_wr(wr)); in mlx4_ib_post_send()
2848 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mlx4_ib_post_send()
2849 rdma_wr(wr)->rkey); in mlx4_ib_post_send()
2857 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); in mlx4_ib_post_send()
2865 set_reg_seg(wqe, reg_wr(wr)); in mlx4_ib_post_send()
2873 set_bind_seg(wqe, bind_mw_wr(wr)); in mlx4_ib_post_send()
2884 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), in mlx4_ib_post_send()
2887 *bad_wr = wr; in mlx4_ib_post_send()
2896 set_datagram_seg(wqe, ud_wr(wr)); in mlx4_ib_post_send()
2903 set_datagram_seg(wqe, ud_wr(wr)); in mlx4_ib_post_send()
2907 if (wr->opcode == IB_WR_LSO) { in mlx4_ib_post_send()
2908 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in mlx4_ib_post_send()
2911 *bad_wr = wr; in mlx4_ib_post_send()
2921 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), in mlx4_ib_post_send()
2924 *bad_wr = wr; in mlx4_ib_post_send()
2933 build_tunnel_header(ud_wr(wr), wqe, &seglen); in mlx4_ib_post_send()
2944 ud_wr(wr), in mlx4_ib_post_send()
2948 build_tunnel_header(ud_wr(wr), wqe, &seglen); in mlx4_ib_post_send()
2955 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl, in mlx4_ib_post_send()
2958 *bad_wr = wr; in mlx4_ib_post_send()
2977 dseg += wr->num_sge - 1; in mlx4_ib_post_send()
2978 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); in mlx4_ib_post_send()
2989 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) in mlx4_ib_post_send()
2990 set_data_seg(dseg, wr->sg_list + i); in mlx4_ib_post_send()
3000 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? in mlx4_ib_post_send()
3010 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { in mlx4_ib_post_send()
3011 *bad_wr = wr; in mlx4_ib_post_send()
3016 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | in mlx4_ib_post_send()
3031 if (wr->next) { in mlx4_ib_post_send()
3067 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in mlx4_ib_post_recv() argument
3085 *bad_wr = wr; in mlx4_ib_post_recv()
3092 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_recv()
3095 *bad_wr = wr; in mlx4_ib_post_recv()
3099 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
3101 *bad_wr = wr; in mlx4_ib_post_recv()
3116 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in mlx4_ib_post_recv()
3122 for (i = 0; i < wr->num_sge; ++i) in mlx4_ib_post_recv()
3123 __set_data_seg(scat + i, wr->sg_list + i); in mlx4_ib_post_recv()
3131 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()