Lines Matching refs:wr
389 struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
397 for (i = 0; i < wr->num_sge; i++) { in build_immd()
398 if ((plen + wr->sg_list[i].length) > max) in build_immd()
400 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
401 plen += wr->sg_list[i].length; in build_immd()
402 rem = wr->sg_list[i].length; in build_immd()
459 struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
465 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
467 switch (wr->opcode) { in build_rdma_send()
469 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
478 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
484 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
494 if (wr->num_sge) { in build_rdma_send()
495 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_send()
496 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
506 wr->sg_list, wr->num_sge, &plen); in build_rdma_send()
510 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_send()
526 struct ib_send_wr *wr, u8 *len16) in build_rdma_write() argument
532 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_write()
535 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_write()
536 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); in build_rdma_write()
537 if (wr->num_sge) { in build_rdma_write()
538 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_write()
539 ret = build_immd(sq, wqe->write.u.immd_src, wr, in build_rdma_write()
549 wr->sg_list, wr->num_sge, &plen); in build_rdma_write()
553 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write()
568 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) in build_rdma_read() argument
570 if (wr->num_sge > 1) in build_rdma_read()
572 if (wr->num_sge) { in build_rdma_read()
573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_read()
574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr in build_rdma_read()
576 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); in build_rdma_read()
577 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); in build_rdma_read()
578 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); in build_rdma_read()
579 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr in build_rdma_read()
581 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); in build_rdma_read()
598 struct ib_recv_wr *wr, u8 *len16) in build_rdma_recv() argument
604 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_rdma_recv()
608 wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_rdma_recv()
613 struct ib_reg_wr *wr, u8 *len16, u8 t5dev) in build_memreg() argument
615 struct c4iw_mr *mhp = to_c4iw_mr(wr->mr); in build_memreg()
626 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; in build_memreg()
628 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); in build_memreg()
631 wqe->fr.stag = cpu_to_be32(wr->key); in build_memreg()
677 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, in build_inv_stag() argument
680 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_inv_stag()
739 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in c4iw_post_send() argument
764 while (wr) { in c4iw_post_send()
767 *bad_wr = wr; in c4iw_post_send()
774 if (wr->send_flags & IB_SEND_SOLICITED) in c4iw_post_send()
776 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
779 switch (wr->opcode) { in c4iw_post_send()
782 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
785 if (wr->opcode == IB_WR_SEND) in c4iw_post_send()
789 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
794 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
800 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) in c4iw_post_send()
804 err = build_rdma_read(wqe, wr, &len16); in c4iw_post_send()
807 swsqe->read_len = wr->sg_list[0].length; in c4iw_post_send()
814 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, in c4iw_post_send()
820 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
824 err = build_inv_stag(wqe, wr, &len16); in c4iw_post_send()
828 wr->opcode); in c4iw_post_send()
832 *bad_wr = wr; in c4iw_post_send()
837 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || in c4iw_post_send()
840 swsqe->wr_id = wr->wr_id; in c4iw_post_send()
850 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
852 wr = wr->next; in c4iw_post_send()
867 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, in c4iw_post_receive() argument
889 while (wr) { in c4iw_post_receive()
890 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_receive()
892 *bad_wr = wr; in c4iw_post_receive()
899 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
903 *bad_wr = wr; in c4iw_post_receive()
907 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
924 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
927 wr = wr->next; in c4iw_post_receive()