Lines Matching refs:qp

82 	struct mlx4_ib_qp	qp;  member
123 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
126 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
136 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
161 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
184 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
186 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
189 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
191 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
194 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
196 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
208 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) in stamp_send_wqe() argument
218 if (qp->sq_max_wqes_per_wr > 1) { in stamp_send_wqe()
219 s = roundup(size, 1U << qp->sq.wqe_shift); in stamp_send_wqe()
221 ind = (i >> qp->sq.wqe_shift) + n; in stamp_send_wqe()
222 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : in stamp_send_wqe()
224 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
225 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe()
229 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
238 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) in post_nop_wqe() argument
245 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe()
248 if (qp->ibqp.qp_type == IB_QPT_UD) { in post_nop_wqe()
252 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); in post_nop_wqe()
270 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in post_nop_wqe()
272 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); in post_nop_wqe()
276 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) in pad_wraparound() argument
278 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); in pad_wraparound()
279 if (unlikely(s < qp->sq_max_wqes_per_wr)) { in pad_wraparound()
280 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); in pad_wraparound()
286 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
289 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
292 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
296 event.element.qp = ibqp; in mlx4_ib_qp_event()
324 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
379 int is_user, int has_rq, struct mlx4_ib_qp *qp) in set_rq_size() argument
390 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
396 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
397 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
398 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size()
403 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
404 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
406 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
407 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
408 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
417 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
424 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
439 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
476 qp->sq_signal_bits && BITS_PER_LONG == 64 && in set_kernel_sq_size()
480 qp->sq.wqe_shift = ilog2(64); in set_kernel_sq_size()
482 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
485 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); in set_kernel_sq_size()
491 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
492 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * in set_kernel_sq_size()
493 qp->sq_max_wqes_per_wr + in set_kernel_sq_size()
494 qp->sq_spare_wqes); in set_kernel_sq_size()
496 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) in set_kernel_sq_size()
499 if (qp->sq_max_wqes_per_wr <= 1) in set_kernel_sq_size()
502 ++qp->sq.wqe_shift; in set_kernel_sq_size()
505 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, in set_kernel_sq_size()
506 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - in set_kernel_sq_size()
507 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
510 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
511 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
512 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
513 qp->rq.offset = 0; in set_kernel_sq_size()
514 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
516 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
517 qp->sq.offset = 0; in set_kernel_sq_size()
520 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
521 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
522 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
532 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
542 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
543 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
545 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
546 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
551 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
555 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
556 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, in alloc_proxy_bufs()
558 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
560 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
561 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
564 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
566 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
567 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
570 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
571 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
580 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
583 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
585 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
586 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
590 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
594 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
595 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
598 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
600 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
622 struct mlx4_ib_qp *qp) in mlx4_ib_free_qp_counter() argument
624 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
625 mlx4_counter_free(dev->dev, qp->counter_index->index); in mlx4_ib_free_qp_counter()
626 list_del(&qp->counter_index->list); in mlx4_ib_free_qp_counter()
627 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
629 kfree(qp->counter_index); in mlx4_ib_free_qp_counter()
630 qp->counter_index = NULL; in mlx4_ib_free_qp_counter()
641 struct mlx4_ib_qp *qp; in create_qp_common() local
694 qp = &sqp->qp; in create_qp_common()
695 qp->pri.vid = 0xFFFF; in create_qp_common()
696 qp->alt.vid = 0xFFFF; in create_qp_common()
698 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); in create_qp_common()
699 if (!qp) in create_qp_common()
701 qp->pri.vid = 0xFFFF; in create_qp_common()
702 qp->alt.vid = 0xFFFF; in create_qp_common()
705 qp = *caller_qp; in create_qp_common()
707 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
709 mutex_init(&qp->mutex); in create_qp_common()
710 spin_lock_init(&qp->sq.lock); in create_qp_common()
711 spin_lock_init(&qp->rq.lock); in create_qp_common()
712 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
713 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
715 qp->state = IB_QPS_RESET; in create_qp_common()
717 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
719 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); in create_qp_common()
731 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
733 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common()
738 qp->buf_size, 0, 0); in create_qp_common()
739 if (IS_ERR(qp->umem)) { in create_qp_common()
740 err = PTR_ERR(qp->umem); in create_qp_common()
744 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common()
745 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common()
749 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
755 ucmd.db_addr, &qp->db); in create_qp_common()
760 qp->sq_no_prefetch = 0; in create_qp_common()
763 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
768 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
773 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
778 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); in create_qp_common()
782 *qp->db.db = 0; in create_qp_common()
785 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { in create_qp_common()
790 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
791 &qp->mtt); in create_qp_common()
795 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); in create_qp_common()
799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp); in create_qp_common()
800 if (!qp->sq.wrid) in create_qp_common()
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), in create_qp_common()
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp); in create_qp_common()
804 if (!qp->rq.wrid) in create_qp_common()
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), in create_qp_common()
807 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
814 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
816 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
832 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
842 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
844 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); in create_qp_common()
849 qp->mqp.qpn |= (1 << 23); in create_qp_common()
856 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
858 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
860 *caller_qp = qp; in create_qp_common()
868 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
873 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
875 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
883 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
889 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
890 free_proxy_bufs(pd->device, qp); in create_qp_common()
894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); in create_qp_common()
896 kvfree(qp->sq.wrid); in create_qp_common()
897 kvfree(qp->rq.wrid); in create_qp_common()
901 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
905 ib_umem_release(qp->umem); in create_qp_common()
907 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
911 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
915 kfree(qp); in create_qp_common()
963 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
967 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
973 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
975 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
976 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
978 return to_mpd(qp->ibqp.pd); in get_pd()
981 static void get_cqs(struct mlx4_ib_qp *qp, in get_cqs() argument
984 switch (qp->ibqp.qp_type) { in get_cqs()
986 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
990 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
994 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
995 *recv_cq = to_mcq(qp->ibqp.recv_cq); in get_cqs()
1000 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
1006 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1007 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
1008 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1010 qp->mqp.qpn); in destroy_qp_common()
1011 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
1012 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
1013 qp->pri.smac = 0; in destroy_qp_common()
1014 qp->pri.smac_port = 0; in destroy_qp_common()
1016 if (qp->alt.smac) { in destroy_qp_common()
1017 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
1018 qp->alt.smac = 0; in destroy_qp_common()
1020 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1021 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1022 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1023 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1024 qp->pri.update_vid = 0; in destroy_qp_common()
1026 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1027 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1028 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1029 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1030 qp->alt.update_vid = 0; in destroy_qp_common()
1034 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
1040 list_del(&qp->qps_list); in destroy_qp_common()
1041 list_del(&qp->cq_send_list); in destroy_qp_common()
1042 list_del(&qp->cq_recv_list); in destroy_qp_common()
1044 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1045 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1047 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1050 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1055 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1057 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1058 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1059 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1061 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1064 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1067 if (qp->rq.wqe_cnt) in destroy_qp_common()
1068 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), in destroy_qp_common()
1069 &qp->db); in destroy_qp_common()
1070 ib_umem_release(qp->umem); in destroy_qp_common()
1072 kvfree(qp->sq.wrid); in destroy_qp_common()
1073 kvfree(qp->rq.wrid); in destroy_qp_common()
1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1076 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1077 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1078 if (qp->rq.wqe_cnt) in destroy_qp_common()
1079 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1082 del_gid_entries(qp); in destroy_qp_common()
1106 struct mlx4_ib_qp *qp = NULL; in mlx4_ib_create_qp() local
1155 qp = kzalloc(sizeof *qp, gfp); in mlx4_ib_create_qp()
1156 if (!qp) in mlx4_ib_create_qp()
1158 qp->pri.vid = 0xFFFF; in mlx4_ib_create_qp()
1159 qp->alt.vid = 0xFFFF; in mlx4_ib_create_qp()
1164 udata, 0, &qp, gfp); in mlx4_ib_create_qp()
1168 qp->ibqp.qp_num = qp->mqp.qpn; in mlx4_ib_create_qp()
1169 qp->xrcdn = xrcdn; in mlx4_ib_create_qp()
1182 &qp, gfp); in mlx4_ib_create_qp()
1186 qp->port = init_attr->port_num; in mlx4_ib_create_qp()
1187 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; in mlx4_ib_create_qp()
1196 return &qp->ibqp; in mlx4_ib_create_qp()
1199 int mlx4_ib_destroy_qp(struct ib_qp *qp) in mlx4_ib_destroy_qp() argument
1201 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_destroy_qp()
1202 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1252 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1262 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1267 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1416 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1422 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1429 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1434 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1440 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1444 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1445 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1447 ge->port = qp->port; in update_mcg_macs()
1453 struct mlx4_ib_qp *qp, in handle_eth_ud_smac_index() argument
1459 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1461 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1462 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1463 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1465 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1466 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1467 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1476 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in create_qp_lb_counter() argument
1482 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != in create_qp_lb_counter()
1484 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || in create_qp_lb_counter()
1500 qp->counter_index = new_counter_index; in create_qp_lb_counter()
1502 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1504 &dev->counters_table[qp->port - 1].counters_list); in create_qp_lb_counter()
1505 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1515 struct mlx4_ib_qp *qp = to_mqp(ibqp); in __mlx4_ib_modify_qp() local
1527 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1536 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
1560 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
1575 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1576 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1577 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1579 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
1580 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1581 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1583 if (new_state == IB_QPS_RESET && qp->counter_index) in __mlx4_ib_modify_qp()
1584 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
1587 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
1588 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
1593 if (qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1610 err = create_qp_lb_counter(dev, qp); in __mlx4_ib_modify_qp()
1615 dev->counters_table[qp->port - 1].default_counter; in __mlx4_ib_modify_qp()
1616 if (qp->counter_index) in __mlx4_ib_modify_qp()
1617 counter_index = qp->counter_index->index; in __mlx4_ib_modify_qp()
1622 if (qp->counter_index) { in __mlx4_ib_modify_qp()
1632 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
1633 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
1639 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1647 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in __mlx4_ib_modify_qp()
1671 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
1693 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
1703 pd = get_pd(qp); in __mlx4_ib_modify_qp()
1704 get_cqs(qp, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
1711 if (!qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1742 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
1758 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1763 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
1780 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx4_ib_modify_qp()
1781 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
1788 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
1789 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
1790 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1793 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
1796 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1800 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1802 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
1803 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
1806 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
1807 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
1808 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
1809 err = handle_eth_ud_smac_index(dev, qp, context); in __mlx4_ib_modify_qp()
1814 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
1815 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
1820 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { in __mlx4_ib_modify_qp()
1832 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1860 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
1861 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
1863 if (qp->sq_max_wqes_per_wr == 1) in __mlx4_ib_modify_qp()
1864 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
1866 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); in __mlx4_ib_modify_qp()
1870 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
1872 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
1876 qp->state = new_state; in __mlx4_ib_modify_qp()
1879 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
1881 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
1883 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
1884 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
1887 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
1889 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
1890 store_sqp_attrs(to_msqp(qp), attr, attr_mask); in __mlx4_ib_modify_qp()
1896 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
1898 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
1900 qp->port); in __mlx4_ib_modify_qp()
1904 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
1913 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
1916 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
1918 qp->rq.head = 0; in __mlx4_ib_modify_qp()
1919 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
1920 qp->sq.head = 0; in __mlx4_ib_modify_qp()
1921 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
1922 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
1923 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1924 *qp->db.db = 0; in __mlx4_ib_modify_qp()
1926 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
1927 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
1929 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
1930 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
1931 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
1932 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
1934 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
1935 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
1936 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
1938 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
1939 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
1940 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
1941 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1942 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
1945 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
1946 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
1947 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
1948 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1949 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
1953 if (err && qp->counter_index) in __mlx4_ib_modify_qp()
1954 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
1956 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
1958 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
1959 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
1961 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
1963 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
1964 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
1965 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
1966 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
1967 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
1969 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
1970 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
1971 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
1973 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
1975 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
1977 if (qp->alt.smac) in __mlx4_ib_modify_qp()
1978 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
1979 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
1980 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
1981 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
1983 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
1984 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
1985 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
1988 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
1990 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
1991 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
1992 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
1994 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
1995 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
1996 qp->pri.vid); in __mlx4_ib_modify_qp()
1997 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
1998 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
1999 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
2001 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2002 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2005 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
2007 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2008 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
2009 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
2011 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
2012 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
2013 qp->alt.vid); in __mlx4_ib_modify_qp()
2014 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
2015 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
2016 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
2018 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2019 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2029 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_modify_qp() local
2033 mutex_lock(&qp->mutex); in mlx4_ib_modify_qp()
2035 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx4_ib_modify_qp()
2041 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
2086 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
2125 mutex_unlock(&qp->mutex); in mlx4_ib_modify_qp()
2146 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header()
2168 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2173 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2190 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); in build_sriov_qp0_header()
2192 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2196 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); in build_sriov_qp0_header()
2200 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2203 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2207 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
2268 struct ib_device *ib_dev = sqp->qp.ibqp.device; in build_mlx_header()
2289 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
2340 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2343 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2358 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
2395 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); in build_mlx_header()
2414 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
2419 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
2420 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
2422 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); in build_mlx_header()
2428 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
2707 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, in build_lso_seg() argument
2715 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
2716 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
2751 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_send() local
2769 spin_lock_irqsave(&qp->sq.lock, flags); in mlx4_ib_post_send()
2777 ind = qp->sq_next_wqe; in mlx4_ib_post_send()
2783 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
2789 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
2795 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in mlx4_ib_post_send()
2796 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
2806 qp->sq_signal_bits; in mlx4_ib_post_send()
2813 switch (qp->mlx4_ib_qp_type) { in mlx4_ib_post_send()
2884 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), in mlx4_ib_post_send()
2908 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in mlx4_ib_post_send()
2921 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), in mlx4_ib_post_send()
2945 qp->mlx4_ib_qp_type); in mlx4_ib_post_send()
2955 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl, in mlx4_ib_post_send()
2981 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in mlx4_ib_post_send()
2982 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in mlx4_ib_post_send()
2983 qp->mlx4_ib_qp_type & in mlx4_ib_post_send()
3017 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in mlx4_ib_post_send()
3019 stamp = ind + qp->sq_spare_wqes; in mlx4_ib_post_send()
3020 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); in mlx4_ib_post_send()
3032 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
3033 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
3039 qp->sq.head += nreq; in mlx4_ib_post_send()
3047 writel(qp->doorbell_qpn, in mlx4_ib_post_send()
3056 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
3058 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
3059 qp->sq_next_wqe = ind; in mlx4_ib_post_send()
3062 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx4_ib_post_send()
3070 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_recv() local
3080 max_gs = qp->rq.max_gs; in mlx4_ib_post_recv()
3081 spin_lock_irqsave(&qp->rq.lock, flags); in mlx4_ib_post_recv()
3090 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
3093 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx4_ib_post_recv()
3099 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
3105 scat = get_recv_wqe(qp, ind); in mlx4_ib_post_recv()
3107 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in mlx4_ib_post_recv()
3110 qp->sqp_proxy_rcv[ind].map, in mlx4_ib_post_recv()
3117 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in mlx4_ib_post_recv()
3131 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()
3133 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
3138 qp->rq.head += nreq; in mlx4_ib_post_recv()
3146 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx4_ib_post_recv()
3149 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx4_ib_post_recv()
3233 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
3238 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
3240 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
3245 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
3253 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
3254 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
3265 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx4_ib_query_qp()
3274 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
3294 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
3295 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
3298 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
3299 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
3314 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
3317 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
3320 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
3324 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
3328 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()