Lines Matching refs:rq

96 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));  in get_recv_wqe()
126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe()
233 qp->rq.max_gs = 0; in set_rq_size()
234 qp->rq.wqe_cnt = 0; in set_rq_size()
235 qp->rq.wqe_shift = 0; in set_rq_size()
238 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
239 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
240 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
241 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
248 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
255 qp->rq.wqe_shift = ilog2(wqe_size); in set_rq_size()
256 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
257 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_buf_size()
645 qp->rq.offset = 0; in create_user_qp()
647 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_user_qp()
767 qp->rq.offset = 0; in create_kernel_qp()
768 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_kernel_qp()
769 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); in create_kernel_qp()
801 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); in create_kernel_qp()
805 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || in create_kernel_qp()
820 kfree(qp->rq.wrid); in create_kernel_qp()
840 kfree(qp->rq.wrid); in destroy_qp_kernel()
881 spin_lock_init(&qp->rq.lock); in create_qp_common()
918 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || in create_qp_common()
919 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { in create_qp_common()
986 if (qp->rq.wqe_cnt) { in create_qp_common()
987 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); in create_qp_common()
988 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; in create_qp_common()
1698 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx5_ib_modify_qp()
1759 qp->rq.head = 0; in __mlx5_ib_modify_qp()
1760 qp->rq.tail = 0; in __mlx5_ib_modify_qp()
2907 spin_lock_irqsave(&qp->rq.lock, flags); in mlx5_ib_post_recv()
2909 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
2912 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx5_ib_post_recv()
2918 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
2931 if (i < qp->rq.max_gs) { in mlx5_ib_post_recv()
2939 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); in mlx5_ib_post_recv()
2942 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()
2944 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
2949 qp->rq.head += nreq; in mlx5_ib_post_recv()
2956 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx5_ib_post_recv()
2959 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx5_ib_post_recv()
3100 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx5_ib_query_qp()
3101 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx5_ib_query_qp()