Lines Matching refs:wr
1839 struct ib_send_wr *wr) in set_datagram_seg() argument
1841 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
1842 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
1843 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
1955 struct ib_send_wr *wr) in set_reg_umr_segment() argument
1957 struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
1961 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment()
1966 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { in set_reg_umr_segment()
1968 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { in set_reg_umr_segment()
1979 if (!wr->num_sge) in set_reg_umr_segment()
2014 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) in set_reg_mkey_segment() argument
2016 struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_mkey_segment()
2019 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { in set_reg_mkey_segment()
2025 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { in set_reg_mkey_segment()
2046 static __be32 send_ieth(struct ib_send_wr *wr) in send_ieth() argument
2048 switch (wr->opcode) { in send_ieth()
2051 return wr->ex.imm_data; in send_ieth()
2054 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
2078 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, in set_data_inl_seg() argument
2091 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
2092 addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
2093 len = wr->sg_list[i].length; in set_data_inl_seg()
2224 static int set_sig_data_segment(struct ib_sig_handover_wr *wr, in set_sig_data_segment() argument
2227 struct ib_sig_attrs *sig_attrs = wr->sig_attrs; in set_sig_data_segment()
2228 struct ib_mr *sig_mr = wr->sig_mr; in set_sig_data_segment()
2230 u32 data_len = wr->wr.sg_list->length; in set_sig_data_segment()
2231 u32 data_key = wr->wr.sg_list->lkey; in set_sig_data_segment()
2232 u64 data_va = wr->wr.sg_list->addr; in set_sig_data_segment()
2236 if (!wr->prot || in set_sig_data_segment()
2237 (data_key == wr->prot->lkey && in set_sig_data_segment()
2238 data_va == wr->prot->addr && in set_sig_data_segment()
2239 data_len == wr->prot->length)) { in set_sig_data_segment()
2273 u32 prot_key = wr->prot->lkey; in set_sig_data_segment()
2274 u64 prot_va = wr->prot->addr; in set_sig_data_segment()
2326 struct ib_sig_handover_wr *wr, u32 nelements, in set_sig_mkey_segment() argument
2329 struct ib_mr *sig_mr = wr->sig_mr; in set_sig_mkey_segment()
2335 seg->flags = get_umr_flags(wr->access_flags) | in set_sig_mkey_segment()
2360 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); in set_sig_umr_wr() local
2361 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); in set_sig_umr_wr()
2366 if (unlikely(wr->wr.num_sge != 1) || in set_sig_umr_wr()
2367 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) || in set_sig_umr_wr()
2373 region_len = wr->wr.sg_list->length; in set_sig_umr_wr()
2374 if (wr->prot && in set_sig_umr_wr()
2375 (wr->prot->lkey != wr->wr.sg_list->lkey || in set_sig_umr_wr()
2376 wr->prot->addr != wr->wr.sg_list->addr || in set_sig_umr_wr()
2377 wr->prot->length != wr->wr.sg_list->length)) in set_sig_umr_wr()
2378 region_len += wr->prot->length; in set_sig_umr_wr()
2385 klm_oct_size = wr->prot ? 3 : 1; in set_sig_umr_wr()
2393 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); in set_sig_umr_wr()
2399 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
2434 struct ib_reg_wr *wr, in set_reg_wr() argument
2437 struct mlx5_ib_mr *mr = to_mmr(wr->mr); in set_reg_wr()
2440 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { in set_reg_wr()
2452 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
2517 static u8 get_fence(u8 fence, struct ib_send_wr *wr) in get_fence() argument
2519 if (unlikely(wr->opcode == IB_WR_LOCAL_INV && in get_fence()
2520 wr->send_flags & IB_SEND_FENCE)) in get_fence()
2524 if (wr->send_flags & IB_SEND_FENCE) in get_fence()
2536 struct ib_send_wr *wr, unsigned *idx, in begin_wqe() argument
2550 (*ctrl)->imm = send_ieth(wr); in begin_wqe()
2552 (wr->send_flags & IB_SEND_SIGNALED ? in begin_wqe()
2554 (wr->send_flags & IB_SEND_SOLICITED ? in begin_wqe()
2587 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in mlx5_ib_post_send() argument
2612 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
2613 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
2616 *bad_wr = wr; in mlx5_ib_post_send()
2621 num_sge = wr->num_sge; in mlx5_ib_post_send()
2625 *bad_wr = wr; in mlx5_ib_post_send()
2629 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); in mlx5_ib_post_send()
2633 *bad_wr = wr; in mlx5_ib_post_send()
2644 switch (wr->opcode) { in mlx5_ib_post_send()
2648 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
2649 rdma_wr(wr)->rkey); in mlx5_ib_post_send()
2659 *bad_wr = wr; in mlx5_ib_post_send()
2665 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); in mlx5_ib_post_send()
2673 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); in mlx5_ib_post_send()
2674 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); in mlx5_ib_post_send()
2676 *bad_wr = wr; in mlx5_ib_post_send()
2684 mr = to_mmr(sig_handover_wr(wr)->sig_mr); in mlx5_ib_post_send()
2687 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
2690 *bad_wr = wr; in mlx5_ib_post_send()
2694 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2695 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2701 wr->send_flags &= ~IB_SEND_SIGNALED; in mlx5_ib_post_send()
2702 wr->send_flags |= IB_SEND_SOLICITED; in mlx5_ib_post_send()
2703 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2708 *bad_wr = wr; in mlx5_ib_post_send()
2712 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem, in mlx5_ib_post_send()
2717 *bad_wr = wr; in mlx5_ib_post_send()
2721 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2722 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2724 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2729 *bad_wr = wr; in mlx5_ib_post_send()
2734 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, in mlx5_ib_post_send()
2739 *bad_wr = wr; in mlx5_ib_post_send()
2743 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2744 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2755 switch (wr->opcode) { in mlx5_ib_post_send()
2758 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
2759 rdma_wr(wr)->rkey); in mlx5_ib_post_send()
2772 set_datagram_seg(seg, wr); in mlx5_ib_post_send()
2780 if (wr->opcode != MLX5_IB_WR_UMR) { in mlx5_ib_post_send()
2786 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); in mlx5_ib_post_send()
2787 set_reg_umr_segment(seg, wr); in mlx5_ib_post_send()
2792 set_reg_mkey_segment(seg, wr); in mlx5_ib_post_send()
2803 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
2806 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
2809 *bad_wr = wr; in mlx5_ib_post_send()
2821 if (likely(wr->sg_list[i].length)) { in mlx5_ib_post_send()
2822 set_data_ptr_seg(dpseg, wr->sg_list + i); in mlx5_ib_post_send()
2829 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
2830 get_fence(fence, wr), next_fence, in mlx5_ib_post_send()
2831 mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
2886 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in mlx5_ib_post_recv() argument
2902 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
2905 *bad_wr = wr; in mlx5_ib_post_recv()
2909 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
2911 *bad_wr = wr; in mlx5_ib_post_recv()
2919 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
2920 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
2933 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()