Lines Matching refs:qhp

86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)  in set_state()  argument
89 spin_lock_irqsave(&qhp->lock, flag); in set_state()
90 qhp->attr.state = state; in set_state()
91 spin_unlock_irqrestore(&qhp->lock, flag); in set_state()
597 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, in build_rdma_recv() argument
602 ret = build_isgl((__be64 *)qhp->wq.rq.queue, in build_rdma_recv()
603 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], in build_rdma_recv()
705 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_sq_db() argument
709 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
710 spin_lock(&qhp->lock); in ring_kernel_sq_db()
711 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
712 t4_ring_sq_db(&qhp->wq, inc, NULL); in ring_kernel_sq_db()
714 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
715 qhp->wq.sq.wq_pidx_inc += inc; in ring_kernel_sq_db()
717 spin_unlock(&qhp->lock); in ring_kernel_sq_db()
718 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_sq_db()
722 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_rq_db() argument
726 spin_lock_irqsave(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
727 spin_lock(&qhp->lock); in ring_kernel_rq_db()
728 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
729 t4_ring_rq_db(&qhp->wq, inc, NULL); in ring_kernel_rq_db()
731 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
732 qhp->wq.rq.wq_pidx_inc += inc; in ring_kernel_rq_db()
734 spin_unlock(&qhp->lock); in ring_kernel_rq_db()
735 spin_unlock_irqrestore(&qhp->rhp->lock, flags); in ring_kernel_rq_db()
746 struct c4iw_qp *qhp; in c4iw_post_send() local
753 qhp = to_c4iw_qp(ibqp); in c4iw_post_send()
754 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_send()
755 if (t4_wq_in_error(&qhp->wq)) { in c4iw_post_send()
756 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
759 num_wrs = t4_sq_avail(&qhp->wq); in c4iw_post_send()
761 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
770 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in c4iw_post_send()
771 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_post_send()
776 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
778 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in c4iw_post_send()
789 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
794 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
808 if (!qhp->wq.sq.oldest_read) in c4iw_post_send()
809 qhp->wq.sq.oldest_read = swsqe; in c4iw_post_send()
814 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, in c4iw_post_send()
816 qhp->rhp->rdev.lldi.adapter_type) ? in c4iw_post_send()
835 swsqe->idx = qhp->wq.sq.pidx; in c4iw_post_send()
838 qhp->sq_sig_all; in c4iw_post_send()
843 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_send()
847 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); in c4iw_post_send()
850 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
854 t4_sq_produce(&qhp->wq, len16); in c4iw_post_send()
857 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_send()
858 t4_ring_sq_db(&qhp->wq, idx, wqe); in c4iw_post_send()
859 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
861 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
862 ring_kernel_sq_db(qhp, idx); in c4iw_post_send()
871 struct c4iw_qp *qhp; in c4iw_post_receive() local
878 qhp = to_c4iw_qp(ibqp); in c4iw_post_receive()
879 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_receive()
880 if (t4_wq_in_error(&qhp->wq)) { in c4iw_post_receive()
881 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
884 num_wrs = t4_rq_avail(&qhp->wq); in c4iw_post_receive()
886 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
895 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + in c4iw_post_receive()
896 qhp->wq.rq.wq_pidx * in c4iw_post_receive()
899 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
907 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
909 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = in c4iw_post_receive()
911 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
913 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); in c4iw_post_receive()
918 wqe->recv.wrid = qhp->wq.rq.pidx; in c4iw_post_receive()
924 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
925 t4_rq_produce(&qhp->wq, len16); in c4iw_post_receive()
930 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
931 t4_ring_rq_db(&qhp->wq, idx, wqe); in c4iw_post_receive()
932 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
934 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
935 ring_kernel_rq_db(qhp, idx); in c4iw_post_receive()
1081 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, in post_terminate() argument
1088 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, in post_terminate()
1089 qhp->ep->hwtid); in post_terminate()
1094 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in post_terminate()
1100 FW_WR_FLOWID_V(qhp->ep->hwtid) | in post_terminate()
1106 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { in post_terminate()
1107 term->layer_etype = qhp->attr.layer_etype; in post_terminate()
1108 term->ecode = qhp->attr.ecode; in post_terminate()
1111 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1117 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, in __flush_qp() argument
1124 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); in __flush_qp()
1128 spin_lock(&qhp->lock); in __flush_qp()
1130 if (qhp->wq.flushed) { in __flush_qp()
1131 spin_unlock(&qhp->lock); in __flush_qp()
1135 qhp->wq.flushed = 1; in __flush_qp()
1138 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp()
1139 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp()
1140 spin_unlock(&qhp->lock); in __flush_qp()
1145 spin_lock(&qhp->lock); in __flush_qp()
1148 sq_flushed = c4iw_flush_sq(qhp); in __flush_qp()
1149 spin_unlock(&qhp->lock); in __flush_qp()
1176 static void flush_qp(struct c4iw_qp *qhp) in flush_qp() argument
1181 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in flush_qp()
1182 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1184 t4_set_wq_in_error(&qhp->wq); in flush_qp()
1185 if (qhp->ibqp.uobject) { in flush_qp()
1199 __flush_qp(qhp, rchp, schp); in flush_qp()
1202 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in rdma_fini() argument
1209 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, in rdma_fini()
1232 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, in rdma_fini()
1233 qhp->wq.sq.qid, __func__); in rdma_fini()
1264 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) in rdma_init() argument
1270 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, in rdma_init()
1271 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); in rdma_init()
1278 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1280 qhp->attr.max_ird = 0; in rdma_init()
1284 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in rdma_init()
1292 FW_WR_FLOWID_V(qhp->ep->hwtid) | in rdma_init()
1295 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; in rdma_init()
1299 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | in rdma_init()
1300 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); in rdma_init()
1302 if (qhp->attr.mpa_attr.recv_marker_enabled) in rdma_init()
1304 if (qhp->attr.mpa_attr.xmit_marker_enabled) in rdma_init()
1306 if (qhp->attr.mpa_attr.crc_enabled) in rdma_init()
1312 if (!qhp->ibqp.uobject) in rdma_init()
1315 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); in rdma_init()
1316 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); in rdma_init()
1317 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1318 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1319 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); in rdma_init()
1320 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); in rdma_init()
1321 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); in rdma_init()
1322 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); in rdma_init()
1323 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); in rdma_init()
1324 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); in rdma_init()
1325 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); in rdma_init()
1326 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); in rdma_init()
1327 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - in rdma_init()
1329 if (qhp->attr.mpa_attr.initiator) in rdma_init()
1330 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); in rdma_init()
1336 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, in rdma_init()
1337 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_init()
1341 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1347 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in c4iw_modify_qp() argument
1353 struct c4iw_qp_attributes newattr = qhp->attr; in c4iw_modify_qp()
1361 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, in c4iw_modify_qp()
1364 mutex_lock(&qhp->mutex); in c4iw_modify_qp()
1368 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { in c4iw_modify_qp()
1392 qhp->attr = newattr; in c4iw_modify_qp()
1396 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); in c4iw_modify_qp()
1400 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); in c4iw_modify_qp()
1406 if (qhp->attr.state == attrs->next_state) in c4iw_modify_qp()
1409 switch (qhp->attr.state) { in c4iw_modify_qp()
1421 qhp->attr.mpa_attr = attrs->mpa_attr; in c4iw_modify_qp()
1422 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; in c4iw_modify_qp()
1423 qhp->ep = qhp->attr.llp_stream_handle; in c4iw_modify_qp()
1424 set_state(qhp, C4IW_QP_STATE_RTS); in c4iw_modify_qp()
1432 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1433 ret = rdma_init(rhp, qhp); in c4iw_modify_qp()
1438 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1439 flush_qp(qhp); in c4iw_modify_qp()
1449 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); in c4iw_modify_qp()
1450 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1451 set_state(qhp, C4IW_QP_STATE_CLOSING); in c4iw_modify_qp()
1452 ep = qhp->ep; in c4iw_modify_qp()
1456 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1458 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1463 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1464 set_state(qhp, C4IW_QP_STATE_TERMINATE); in c4iw_modify_qp()
1465 qhp->attr.layer_etype = attrs->layer_etype; in c4iw_modify_qp()
1466 qhp->attr.ecode = attrs->ecode; in c4iw_modify_qp()
1467 ep = qhp->ep; in c4iw_modify_qp()
1469 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1473 terminate = qhp->attr.send_term; in c4iw_modify_qp()
1474 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1480 t4_set_wq_in_error(&qhp->wq); in c4iw_modify_qp()
1481 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1485 ep = qhp->ep; in c4iw_modify_qp()
1486 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1502 flush_qp(qhp); in c4iw_modify_qp()
1503 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
1504 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1505 c4iw_put_ep(&qhp->ep->com); in c4iw_modify_qp()
1506 qhp->ep = NULL; in c4iw_modify_qp()
1507 wake_up(&qhp->wait); in c4iw_modify_qp()
1521 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { in c4iw_modify_qp()
1525 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
1536 __func__, qhp->attr.state); in c4iw_modify_qp()
1543 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, in c4iw_modify_qp()
1544 qhp->wq.sq.qid); in c4iw_modify_qp()
1547 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1549 ep = qhp->ep; in c4iw_modify_qp()
1550 qhp->ep = NULL; in c4iw_modify_qp()
1551 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1555 flush_qp(qhp); in c4iw_modify_qp()
1556 wake_up(&qhp->wait); in c4iw_modify_qp()
1558 mutex_unlock(&qhp->mutex); in c4iw_modify_qp()
1561 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); in c4iw_modify_qp()
1580 PDBG("%s exit state %d\n", __func__, qhp->attr.state); in c4iw_modify_qp()
1587 struct c4iw_qp *qhp; in c4iw_destroy_qp() local
1591 qhp = to_c4iw_qp(ib_qp); in c4iw_destroy_qp()
1592 rhp = qhp->rhp; in c4iw_destroy_qp()
1595 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) in c4iw_destroy_qp()
1596 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); in c4iw_destroy_qp()
1598 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); in c4iw_destroy_qp()
1599 wait_event(qhp->wait, !qhp->ep); in c4iw_destroy_qp()
1601 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); in c4iw_destroy_qp()
1602 atomic_dec(&qhp->refcnt); in c4iw_destroy_qp()
1603 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); in c4iw_destroy_qp()
1606 if (!list_empty(&qhp->db_fc_entry)) in c4iw_destroy_qp()
1607 list_del_init(&qhp->db_fc_entry); in c4iw_destroy_qp()
1609 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
1613 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
1616 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); in c4iw_destroy_qp()
1617 kfree(qhp); in c4iw_destroy_qp()
1625 struct c4iw_qp *qhp; in c4iw_create_qp() local
1664 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); in c4iw_create_qp()
1665 if (!qhp) in c4iw_create_qp()
1667 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
1668 qhp->wq.sq.memsize = in c4iw_create_qp()
1670 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); in c4iw_create_qp()
1671 qhp->wq.sq.flush_cidx = -1; in c4iw_create_qp()
1672 qhp->wq.rq.size = rqsize; in c4iw_create_qp()
1673 qhp->wq.rq.memsize = in c4iw_create_qp()
1675 sizeof(*qhp->wq.rq.queue); in c4iw_create_qp()
1678 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); in c4iw_create_qp()
1679 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); in c4iw_create_qp()
1682 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
1691 qhp->rhp = rhp; in c4iw_create_qp()
1692 qhp->attr.pd = php->pdid; in c4iw_create_qp()
1693 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
1694 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; in c4iw_create_qp()
1695 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; in c4iw_create_qp()
1696 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; in c4iw_create_qp()
1697 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; in c4iw_create_qp()
1698 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; in c4iw_create_qp()
1699 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; in c4iw_create_qp()
1700 qhp->attr.state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
1701 qhp->attr.next_state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
1702 qhp->attr.enable_rdma_read = 1; in c4iw_create_qp()
1703 qhp->attr.enable_rdma_write = 1; in c4iw_create_qp()
1704 qhp->attr.enable_bind = 1; in c4iw_create_qp()
1705 qhp->attr.max_ord = 0; in c4iw_create_qp()
1706 qhp->attr.max_ird = 0; in c4iw_create_qp()
1707 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; in c4iw_create_qp()
1708 spin_lock_init(&qhp->lock); in c4iw_create_qp()
1709 mutex_init(&qhp->mutex); in c4iw_create_qp()
1710 init_waitqueue_head(&qhp->wait); in c4iw_create_qp()
1711 atomic_set(&qhp->refcnt, 1); in c4iw_create_qp()
1713 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); in c4iw_create_qp()
1738 if (t4_sq_onchip(&qhp->wq.sq)) { in c4iw_create_qp()
1748 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp()
1749 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp()
1750 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp()
1751 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp()
1752 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp()
1753 uresp.rq_memsize = qhp->wq.rq.memsize; in c4iw_create_qp()
1774 mm1->addr = qhp->wq.sq.phys_addr; in c4iw_create_qp()
1775 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); in c4iw_create_qp()
1778 mm2->addr = virt_to_phys(qhp->wq.rq.queue); in c4iw_create_qp()
1779 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); in c4iw_create_qp()
1782 mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; in c4iw_create_qp()
1786 mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; in c4iw_create_qp()
1797 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp()
1798 init_timer(&(qhp->timer)); in c4iw_create_qp()
1799 INIT_LIST_HEAD(&qhp->db_fc_entry); in c4iw_create_qp()
1802 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp()
1803 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, in c4iw_create_qp()
1804 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); in c4iw_create_qp()
1805 return &qhp->ibqp; in c4iw_create_qp()
1817 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); in c4iw_create_qp()
1819 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
1822 kfree(qhp); in c4iw_create_qp()
1830 struct c4iw_qp *qhp; in c4iw_ib_modify_qp() local
1845 qhp = to_c4iw_qp(ibqp); in c4iw_ib_modify_qp()
1846 rhp = qhp->rhp; in c4iw_ib_modify_qp()
1875 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); in c4iw_ib_modify_qp()
1887 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_ib_query_qp() local
1891 attr->qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
1892 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; in c4iw_ib_query_qp()
1893 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; in c4iw_ib_query_qp()
1894 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
1895 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
1897 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; in c4iw_ib_query_qp()