Lines Matching refs:wr

1851 			     struct ib_send_wr *wr)  in set_datagram_seg()  argument
1853 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
1854 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
1855 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); in set_datagram_seg()
1912 struct ib_send_wr *wr, int li) in set_frwr_umr_segment() argument
1923 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); in set_frwr_umr_segment()
1965 struct ib_send_wr *wr) in set_reg_umr_segment() argument
1967 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; in set_reg_umr_segment()
1971 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment()
1976 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { in set_reg_umr_segment()
1978 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { in set_reg_umr_segment()
1989 if (!wr->num_sge) in set_reg_umr_segment()
2002 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, in set_mkey_segment() argument
2011 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | in set_mkey_segment()
2014 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); in set_mkey_segment()
2016 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); in set_mkey_segment()
2017 seg->len = cpu_to_be64(wr->wr.fast_reg.length); in set_mkey_segment()
2018 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); in set_mkey_segment()
2019 seg->log2_page_size = wr->wr.fast_reg.page_shift; in set_mkey_segment()
2022 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) in set_reg_mkey_segment() argument
2024 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; in set_reg_mkey_segment()
2027 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { in set_reg_mkey_segment()
2033 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { in set_reg_mkey_segment()
2044 struct ib_send_wr *wr, in set_frwr_pages() argument
2049 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); in set_frwr_pages()
2050 u64 *page_list = wr->wr.fast_reg.page_list->page_list; in set_frwr_pages()
2054 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) in set_frwr_pages()
2057 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); in set_frwr_pages()
2061 static __be32 send_ieth(struct ib_send_wr *wr) in send_ieth() argument
2063 switch (wr->opcode) { in send_ieth()
2066 return wr->ex.imm_data; in send_ieth()
2069 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
2093 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, in set_data_inl_seg() argument
2106 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
2107 addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
2108 len = wr->sg_list[i].length; in set_data_inl_seg()
2239 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_sig_data_segment() argument
2242 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; in set_sig_data_segment()
2243 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; in set_sig_data_segment()
2245 u32 data_len = wr->sg_list->length; in set_sig_data_segment()
2246 u32 data_key = wr->sg_list->lkey; in set_sig_data_segment()
2247 u64 data_va = wr->sg_list->addr; in set_sig_data_segment()
2251 if (!wr->wr.sig_handover.prot || in set_sig_data_segment()
2252 (data_key == wr->wr.sig_handover.prot->lkey && in set_sig_data_segment()
2253 data_va == wr->wr.sig_handover.prot->addr && in set_sig_data_segment()
2254 data_len == wr->wr.sig_handover.prot->length)) { in set_sig_data_segment()
2288 u32 prot_key = wr->wr.sig_handover.prot->lkey; in set_sig_data_segment()
2289 u64 prot_va = wr->wr.sig_handover.prot->addr; in set_sig_data_segment()
2341 struct ib_send_wr *wr, u32 nelements, in set_sig_mkey_segment() argument
2344 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; in set_sig_mkey_segment()
2350 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | in set_sig_mkey_segment()
2361 struct ib_send_wr *wr, u32 nelements) in set_sig_umr_segment() argument
2372 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_sig_umr_wr() argument
2375 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); in set_sig_umr_wr()
2380 if (unlikely(wr->num_sge != 1) || in set_sig_umr_wr()
2381 unlikely(wr->wr.sig_handover.access_flags & in set_sig_umr_wr()
2388 region_len = wr->sg_list->length; in set_sig_umr_wr()
2389 if (wr->wr.sig_handover.prot && in set_sig_umr_wr()
2390 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || in set_sig_umr_wr()
2391 wr->wr.sig_handover.prot->addr != wr->sg_list->addr || in set_sig_umr_wr()
2392 wr->wr.sig_handover.prot->length != wr->sg_list->length)) in set_sig_umr_wr()
2393 region_len += wr->wr.sig_handover.prot->length; in set_sig_umr_wr()
2400 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; in set_sig_umr_wr()
2402 set_sig_umr_segment(*seg, wr, klm_oct_size); in set_sig_umr_wr()
2408 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); in set_sig_umr_wr()
2414 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
2448 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, in set_frwr_li_wr() argument
2454 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; in set_frwr_li_wr()
2455 if (unlikely(wr->send_flags & IB_SEND_INLINE)) in set_frwr_li_wr()
2458 set_frwr_umr_segment(*seg, wr, li); in set_frwr_li_wr()
2463 set_mkey_segment(*seg, wr, li, &writ); in set_frwr_li_wr()
2469 if (unlikely(wr->wr.fast_reg.page_list_len > in set_frwr_li_wr()
2470 wr->wr.fast_reg.page_list->max_page_list_len)) in set_frwr_li_wr()
2473 set_frwr_pages(*seg, wr, mdev, pd, writ); in set_frwr_li_wr()
2518 static u8 get_fence(u8 fence, struct ib_send_wr *wr) in get_fence() argument
2520 if (unlikely(wr->opcode == IB_WR_LOCAL_INV && in get_fence()
2521 wr->send_flags & IB_SEND_FENCE)) in get_fence()
2525 if (wr->send_flags & IB_SEND_FENCE) in get_fence()
2537 struct ib_send_wr *wr, unsigned *idx, in begin_wqe() argument
2551 (*ctrl)->imm = send_ieth(wr); in begin_wqe()
2553 (wr->send_flags & IB_SEND_SIGNALED ? in begin_wqe()
2555 (wr->send_flags & IB_SEND_SOLICITED ? in begin_wqe()
2588 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in mlx5_ib_post_send() argument
2614 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
2615 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
2618 *bad_wr = wr; in mlx5_ib_post_send()
2623 num_sge = wr->num_sge; in mlx5_ib_post_send()
2627 *bad_wr = wr; in mlx5_ib_post_send()
2631 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); in mlx5_ib_post_send()
2635 *bad_wr = wr; in mlx5_ib_post_send()
2642 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num); in mlx5_ib_post_send()
2647 switch (wr->opcode) { in mlx5_ib_post_send()
2651 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in mlx5_ib_post_send()
2652 wr->wr.rdma.rkey); in mlx5_ib_post_send()
2662 *bad_wr = wr; in mlx5_ib_post_send()
2668 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); in mlx5_ib_post_send()
2669 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); in mlx5_ib_post_send()
2672 *bad_wr = wr; in mlx5_ib_post_send()
2681 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); in mlx5_ib_post_send()
2682 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); in mlx5_ib_post_send()
2685 *bad_wr = wr; in mlx5_ib_post_send()
2693 mr = to_mmr(wr->wr.sig_handover.sig_mr); in mlx5_ib_post_send()
2696 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
2699 *bad_wr = wr; in mlx5_ib_post_send()
2703 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2704 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2710 wr->send_flags &= ~IB_SEND_SIGNALED; in mlx5_ib_post_send()
2711 wr->send_flags |= IB_SEND_SOLICITED; in mlx5_ib_post_send()
2712 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2717 *bad_wr = wr; in mlx5_ib_post_send()
2721 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, in mlx5_ib_post_send()
2726 *bad_wr = wr; in mlx5_ib_post_send()
2730 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2731 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2733 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2738 *bad_wr = wr; in mlx5_ib_post_send()
2743 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, in mlx5_ib_post_send()
2748 *bad_wr = wr; in mlx5_ib_post_send()
2752 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2753 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
2764 switch (wr->opcode) { in mlx5_ib_post_send()
2767 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in mlx5_ib_post_send()
2768 wr->wr.rdma.rkey); in mlx5_ib_post_send()
2781 set_datagram_seg(seg, wr); in mlx5_ib_post_send()
2789 if (wr->opcode != MLX5_IB_WR_UMR) { in mlx5_ib_post_send()
2795 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); in mlx5_ib_post_send()
2796 set_reg_umr_segment(seg, wr); in mlx5_ib_post_send()
2801 set_reg_mkey_segment(seg, wr); in mlx5_ib_post_send()
2812 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
2815 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
2818 *bad_wr = wr; in mlx5_ib_post_send()
2830 if (likely(wr->sg_list[i].length)) { in mlx5_ib_post_send()
2831 set_data_ptr_seg(dpseg, wr->sg_list + i); in mlx5_ib_post_send()
2838 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
2839 get_fence(fence, wr), next_fence, in mlx5_ib_post_send()
2840 mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
2895 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in mlx5_ib_post_recv() argument
2911 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
2914 *bad_wr = wr; in mlx5_ib_post_recv()
2918 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
2920 *bad_wr = wr; in mlx5_ib_post_recv()
2928 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
2929 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
2942 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()