Lines Matching refs:wr

42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,  in build_rdma_send()  argument
48 switch (wr->opcode) { in build_rdma_send()
50 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
57 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
66 if (wr->num_sge > T3_MAX_SGE) in build_rdma_send()
72 for (i = 0; i < wr->num_sge; i++) { in build_rdma_send()
73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send()
76 plen += wr->sg_list[i].length; in build_rdma_send()
77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send()
78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); in build_rdma_send()
79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); in build_rdma_send()
81 wqe->send.num_sgle = cpu_to_be32(wr->num_sge); in build_rdma_send()
82 *flit_cnt = 4 + ((wr->num_sge) << 1); in build_rdma_send()
87 static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_write() argument
92 if (wr->num_sge > T3_MAX_SGE) in build_rdma_write()
98 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); in build_rdma_write()
99 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); in build_rdma_write()
101 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { in build_rdma_write()
103 wqe->write.sgl[0].stag = wr->ex.imm_data; in build_rdma_write()
109 for (i = 0; i < wr->num_sge; i++) { in build_rdma_write()
110 if ((plen + wr->sg_list[i].length) < plen) { in build_rdma_write()
113 plen += wr->sg_list[i].length; in build_rdma_write()
115 cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_write()
117 cpu_to_be32(wr->sg_list[i].length); in build_rdma_write()
119 cpu_to_be64(wr->sg_list[i].addr); in build_rdma_write()
121 wqe->write.num_sgle = cpu_to_be32(wr->num_sge); in build_rdma_write()
122 *flit_cnt = 5 + ((wr->num_sge) << 1); in build_rdma_write()
128 static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_read() argument
131 if (wr->num_sge > 1) in build_rdma_read()
134 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) in build_rdma_read()
140 wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey); in build_rdma_read()
141 wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr); in build_rdma_read()
142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); in build_rdma_read()
143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); in build_rdma_read()
144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); in build_rdma_read()
149 static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, in build_fastreg() argument
155 if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH) in build_fastreg()
158 wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey); in build_fastreg()
159 wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length); in build_fastreg()
160 wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); in build_fastreg()
162 cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); in build_fastreg()
164 V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) | in build_fastreg()
165 V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) | in build_fastreg()
167 V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags))); in build_fastreg()
169 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) { in build_fastreg()
178 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG, in build_fastreg()
183 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
185 *flit_cnt = 5 + wr->wr.fast_reg.page_list_len; in build_fastreg()
191 static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, in build_inv_stag() argument
194 wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_inv_stag()
248 struct ib_recv_wr *wr) in build_rdma_recv() argument
254 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr, in build_rdma_recv()
262 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); in build_rdma_recv()
263 for (i = 0; i < wr->num_sge; i++) { in build_rdma_recv()
264 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_recv()
265 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); in build_rdma_recv()
268 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) & in build_rdma_recv()
281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_rdma_recv()
288 struct ib_recv_wr *wr) in build_zero_stag_recv() argument
310 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); in build_zero_stag_recv()
312 for (i = 0; i < wr->num_sge; i++) { in build_zero_stag_recv()
321 if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN) in build_zero_stag_recv()
328 if (wr->sg_list[i].lkey != 0) in build_zero_stag_recv()
331 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); in build_zero_stag_recv()
332 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); in build_zero_stag_recv()
344 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_zero_stag_recv()
350 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in iwch_post_send() argument
379 while (wr) { in iwch_post_send()
387 if (wr->send_flags & IB_SEND_SOLICITED) in iwch_post_send()
389 if (wr->send_flags & IB_SEND_SIGNALED) in iwch_post_send()
393 switch (wr->opcode) { in iwch_post_send()
396 if (wr->send_flags & IB_SEND_FENCE) in iwch_post_send()
399 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt); in iwch_post_send()
404 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt); in iwch_post_send()
410 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt); in iwch_post_send()
419 err = build_fastreg(wqe, wr, &t3_wr_flit_cnt, in iwch_post_send()
423 if (wr->send_flags & IB_SEND_FENCE) in iwch_post_send()
426 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt); in iwch_post_send()
430 wr->opcode); in iwch_post_send()
436 sqp->wr_id = wr->wr_id; in iwch_post_send()
440 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED); in iwch_post_send()
447 __func__, (unsigned long long) wr->wr_id, idx, in iwch_post_send()
450 wr = wr->next; in iwch_post_send()
461 *bad_wr = wr; in iwch_post_send()
465 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, in iwch_post_receive() argument
484 if (!wr) { in iwch_post_receive()
489 while (wr) { in iwch_post_receive()
490 if (wr->num_sge > T3_MAX_SGE) { in iwch_post_receive()
497 if (wr->sg_list[0].lkey) in iwch_post_receive()
498 err = build_rdma_recv(qhp, wqe, wr); in iwch_post_receive()
500 err = build_zero_stag_recv(qhp, wqe, wr); in iwch_post_receive()
511 "wqe %p \n", __func__, (unsigned long long) wr->wr_id, in iwch_post_receive()
515 wr = wr->next; in iwch_post_receive()
524 *bad_wr = wr; in iwch_post_receive()