Lines Matching refs:send_cq

971 		scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);  in create_qp_common()
1021 if (init_attr->send_cq) in create_qp_common()
1022 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common()
1056 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) in mlx5_ib_lock_cqs() argument
1057 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx5_ib_lock_cqs()
1059 if (send_cq) { in mlx5_ib_lock_cqs()
1061 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1062 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
1065 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1066 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
1070 spin_lock_nested(&send_cq->lock, in mlx5_ib_lock_cqs()
1074 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
1079 __acquire(&send_cq->lock); in mlx5_ib_lock_cqs()
1081 __acquire(&send_cq->lock); in mlx5_ib_lock_cqs()
1086 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) in mlx5_ib_unlock_cqs() argument
1087 __releases(&send_cq->lock) __releases(&recv_cq->lock) in mlx5_ib_unlock_cqs()
1089 if (send_cq) { in mlx5_ib_unlock_cqs()
1091 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs()
1093 spin_unlock_irq(&send_cq->lock); in mlx5_ib_unlock_cqs()
1094 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs()
1096 spin_unlock_irq(&send_cq->lock); in mlx5_ib_unlock_cqs()
1098 spin_unlock(&send_cq->lock); in mlx5_ib_unlock_cqs()
1103 spin_unlock_irq(&send_cq->lock); in mlx5_ib_unlock_cqs()
1106 __release(&send_cq->lock); in mlx5_ib_unlock_cqs()
1110 __release(&send_cq->lock); in mlx5_ib_unlock_cqs()
1120 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) in get_cqs() argument
1124 *send_cq = NULL; in get_cqs()
1129 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1140 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1147 *send_cq = NULL; in get_cqs()
1155 struct mlx5_ib_cq *send_cq, *recv_cq; in destroy_qp_common() local
1171 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
1174 mlx5_ib_lock_cqs(send_cq, recv_cq); in destroy_qp_common()
1177 if (send_cq != recv_cq) in destroy_qp_common()
1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1179 mlx5_ib_unlock_cqs(send_cq, recv_cq); in destroy_qp_common()
1259 init_attr->send_cq = NULL; in mlx5_ib_create_qp()
1289 to_mcq(init_attr->send_cq)->mcq.cqn); in mlx5_ib_create_qp()
1571 struct mlx5_ib_cq *send_cq, *recv_cq; in __mlx5_ib_modify_qp() local
1658 get_cqs(qp, &send_cq, &recv_cq); in __mlx5_ib_modify_qp()
1661 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; in __mlx5_ib_modify_qp()
1756 if (send_cq != recv_cq) in __mlx5_ib_modify_qp()
1757 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx5_ib_modify_qp()
2542 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { in begin_wqe()