/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_mcast.c | 59 int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in ehca_attach_mcast() argument 61 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); in ehca_attach_mcast() 62 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, in ehca_attach_mcast() 67 if (ibqp->qp_type != IB_QPT_UD) { in ehca_attach_mcast() 68 ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type); in ehca_attach_mcast() 73 ehca_err(ibqp->device, "invalid mulitcast gid"); in ehca_attach_mcast() 76 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); in ehca_attach_mcast() 89 ehca_err(ibqp->device, in ehca_attach_mcast() 91 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); in ehca_attach_mcast() 96 int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in ehca_detach_mcast() argument [all …]
|
D | ehca_qp.c | 1237 static int internal_modify_qp(struct ib_qp *ibqp, in internal_modify_qp() argument 1245 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); in internal_modify_qp() 1247 container_of(ibqp->pd->device, struct ehca_shca, ib_device); in internal_modify_qp() 1258 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " in internal_modify_qp() 1259 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); in internal_modify_qp() 1268 ehca_err(ibqp->device, "hipz_h_query_qp() failed " in internal_modify_qp() 1270 my_qp, ibqp->qp_num, h_ret); in internal_modify_qp() 1274 if (ibqp->uobject) in internal_modify_qp() 1281 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x " in internal_modify_qp() 1283 mqpcb->qp_state, my_qp, ibqp->qp_num); in internal_modify_qp() [all …]
|
D | ehca_iverbs.h | 149 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 176 u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
|
D | ehca_main.c | 535 struct ib_qp *ibqp; in ehca_create_aqp1() local 571 ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr); in ehca_create_aqp1() 572 if (IS_ERR(ibqp)) { in ehca_create_aqp1() 574 ret = PTR_ERR(ibqp); in ehca_create_aqp1() 577 sport->ibqp_sqp[IB_QPT_GSI] = ibqp; in ehca_create_aqp1()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_ud.c | 51 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ud_loopback() 68 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 69 IB_QPT_UD : sqp->ibqp.qp_type; in qib_ud_loopback() 70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 71 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback() 82 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback() 94 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 106 if (qp->ibqp.qp_num) { in qib_ud_loopback() 118 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 154 if (qp->ibqp.qp_num == 0) in qib_ud_loopback() [all …]
|
D | qib_ruc.c | 89 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_init_sge() 90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge() 120 wc.qp = &qp->ibqp; in qib_init_sge() 122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge() 149 if (qp->ibqp.srq) { in qib_get_rwqe() 150 srq = to_isrq(qp->ibqp.srq); in qib_get_rwqe() 212 ev.device = qp->ibqp.device; in qib_get_rwqe() 213 ev.element.srq = qp->ibqp.srq; in qib_get_rwqe() 238 ev.device = qp->ibqp.device; in qib_migrate_qp() 239 ev.element.qp = &qp->ibqp; in qib_migrate_qp() [all …]
|
D | qib_qp.c | 225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp() 227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in insert_qp() 232 if (qp->ibqp.qp_num == 0) in insert_qp() 234 else if (qp->ibqp.qp_num == 1) in insert_qp() 250 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in remove_qp() 251 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in remove_qp() 357 if (qp->ibqp.qp_num == qpn) { in qib_lookup_qpn() 437 if (qp->ibqp.qp_type == IB_QPT_UD || in clear_mr_refs() 438 qp->ibqp.qp_type == IB_QPT_SMI || in clear_mr_refs() 439 qp->ibqp.qp_type == IB_QPT_GSI) in clear_mr_refs() [all …]
|
D | qib_verbs_mcast.c | 225 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in qib_multicast_attach() argument 227 struct qib_qp *qp = to_iqp(ibqp); in qib_multicast_attach() 228 struct qib_ibdev *dev = to_idev(ibqp->device); in qib_multicast_attach() 234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { in qib_multicast_attach() 254 ibp = to_iport(ibqp->device, qp->port_num); in qib_multicast_attach() 283 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in qib_multicast_detach() argument 285 struct qib_qp *qp = to_iqp(ibqp); in qib_multicast_detach() 286 struct qib_ibdev *dev = to_idev(ibqp->device); in qib_multicast_detach() 287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); in qib_multicast_detach() 294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) in qib_multicast_detach()
|
D | qib_keys.c | 255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_rkey_ok() 266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_rkey_ok() 289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in qib_rkey_ok() 343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_fast_reg_mr() 344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_fast_reg_mr() 360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) in qib_fast_reg_mr()
|
D | qib_uc.c | 333 if (qp->ibqp.event_handler) { in qib_uc_rcv() 336 ev.device = qp->ibqp.device; in qib_uc_rcv() 337 ev.element.qp = &qp->ibqp; in qib_uc_rcv() 339 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_uc_rcv() 407 wc.qp = &qp->ibqp; in qib_uc_rcv() 417 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in qib_uc_rcv()
|
D | qib_verbs.c | 368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { in qib_post_one_send() 371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in qib_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in qib_post_one_send() 397 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_post_one_send() 398 pd = to_ipd(qp->ibqp.pd); in qib_post_one_send() 421 if (qp->ibqp.qp_type == IB_QPT_UC || in qib_post_one_send() 422 qp->ibqp.qp_type == IB_QPT_RC) { in qib_post_one_send() 425 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + in qib_post_one_send() 447 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { in qib_post_one_send() 463 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in qib_post_send() argument [all …]
|
D | qib_verbs.h | 421 struct ib_qp ibqp; member 833 static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) in to_iqp() argument 835 return container_of(ibqp, struct qib_qp, ibqp); in to_iqp() 900 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 902 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 914 int qib_destroy_qp(struct ib_qp *ibqp); 918 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 921 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
D | qib_rc.c | 233 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in qib_make_rc_req() 650 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qib_send_rc_ack() 651 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_send_rc_ack() 886 ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_restart_rc() 912 ibp = to_iport(qp->ibqp.device, qp->port_num); in rc_timeout() 1025 wc.qp = &qp->ibqp; in qib_rc_send_complete() 1026 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in qib_rc_send_complete() 1081 wc.qp = &qp->ibqp; in do_rc_completion() 1082 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in do_rc_completion() 1153 ibp = to_iport(qp->ibqp.device, qp->port_num); in do_rc_ack() [all …]
|
D | qib_driver.c | 370 switch (qp->ibqp.qp_type) { in qib_rcv_hdrerr()
|
D | qib_sdma.c | 669 if (qp->ibqp.qp_type == IB_QPT_RC) { in qib_sdma_verbs_send()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_ruc.c | 89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_insert_rnr_queue() 149 wc.qp = &qp->ibqp; in ipath_init_sge() 151 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_init_sge() 177 if (qp->ibqp.srq) { in ipath_get_rwqe() 178 srq = to_isrq(qp->ibqp.srq); in ipath_get_rwqe() 236 ev.device = qp->ibqp.device; in ipath_get_rwqe() 237 ev.element.srq = qp->ibqp.srq; in ipath_get_rwqe() 262 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); in ipath_ruc_loopback() 318 if (sqp->ibqp.qp_type == IB_QPT_RC) in ipath_ruc_loopback() 439 wc.qp = &qp->ibqp; in ipath_ruc_loopback() [all …]
|
D | ipath_qp.c | 218 qp->ibqp.qp_num = ret; in ipath_alloc_qpn() 251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; in ipath_free_qp() 309 if (qp->ibqp.qp_num == qpn) { in ipath_lookup_qpn() 379 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_error_qp() 400 wc.qp = &qp->ibqp; in ipath_error_qp() 406 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp() 429 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp() 434 } else if (qp->ibqp.event_handler) in ipath_error_qp() 450 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in ipath_modify_qp() argument 453 struct ipath_ibdev *dev = to_idev(ibqp->device); in ipath_modify_qp() [all …]
|
D | ipath_ud.c | 52 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); in ipath_ud_loopback() 79 if (unlikely(qp->ibqp.qp_num && in ipath_ud_loopback() 107 if (qp->ibqp.srq) { in ipath_ud_loopback() 108 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback() 169 ev.device = qp->ibqp.device; in ipath_ud_loopback() 170 ev.element.srq = qp->ibqp.srq; in ipath_ud_loopback() 215 wc.qp = &qp->ibqp; in ipath_ud_loopback() 216 wc.src_qp = sqp->ibqp.qp_num; in ipath_ud_loopback() 227 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_ud_loopback() 243 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_make_ud_req() [all …]
|
D | ipath_keys.c | 124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in ipath_lkey_ok() 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_lkey_ok() 151 qp->ibqp.pd != mr->pd)) { in ipath_lkey_ok() 202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_rkey_ok() 216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_rkey_ok() 234 qp->ibqp.pd != mr->pd)) { in ipath_rkey_ok()
|
D | ipath_verbs_mcast.c | 236 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in ipath_multicast_attach() argument 238 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_attach() 239 struct ipath_ibdev *dev = to_idev(ibqp->device); in ipath_multicast_attach() 285 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in ipath_multicast_detach() argument 287 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_detach() 288 struct ipath_ibdev *dev = to_idev(ibqp->device); in ipath_multicast_detach()
|
D | ipath_verbs.h | 358 struct ib_qp ibqp; member 680 static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp) in to_iqp() argument 682 return container_of(ibqp, struct ipath_qp, ibqp); in to_iqp() 726 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 728 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 740 int ipath_destroy_qp(struct ib_qp *ibqp); 744 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 747 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
D | ipath_verbs.c | 345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send() 349 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send() 368 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send() 371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in ipath_post_one_send() 418 if (qp->ibqp.qp_type == IB_QPT_UC || in ipath_post_one_send() 419 qp->ibqp.qp_type == IB_QPT_RC) { in ipath_post_one_send() 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) in ipath_post_one_send() 445 static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in ipath_post_send() argument 448 struct ipath_qp *qp = to_iqp(ibqp); in ipath_post_send() [all …]
|
D | ipath_rc.c | 68 dev = to_idev(qp->ibqp.device); in ipath_init_restart() 215 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_make_rc_req() 614 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in send_rc_ack() 821 dev = to_idev(qp->ibqp.device); in ipath_restart_rc() 860 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in do_rc_ack() 956 wc.qp = &qp->ibqp; in do_rc_ack() 960 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in do_rc_ack() 1537 ev.device = qp->ibqp.device; in ipath_rc_error() 1538 ev.element.qp = &qp->ibqp; in ipath_rc_error() 1540 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in ipath_rc_error() [all …]
|
D | ipath_uc.c | 213 ipath_make_ruc_header(to_idev(qp->ibqp.device), in ipath_make_uc_req() 415 wc.qp = &qp->ibqp; in ipath_uc_rcv() 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_uc_rcv()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 260 event.element.qp = &qp->ibqp; in mthca_qp_event() 261 if (qp->ibqp.event_handler) in mthca_qp_event() 262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); in mthca_qp_event() 424 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, in mthca_query_qp() argument 427 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_query_qp() 428 struct mthca_qp *qp = to_mqp(ibqp); in mthca_query_qp() 543 static int __mthca_modify_qp(struct ib_qp *ibqp, in __mthca_modify_qp() argument 547 struct mthca_dev *dev = to_mdev(ibqp->device); in __mthca_modify_qp() 548 struct mthca_qp *qp = to_mqp(ibqp); in __mthca_modify_qp() 609 if (qp->ibqp.uobject) in __mthca_modify_qp() [all …]
|
D | mthca_mcg.c | 120 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_attach() argument 122 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_attach() 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { in mthca_multicast_attach() 167 ibqp->qp_num); in mthca_multicast_attach() 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); in mthca_multicast_attach() 214 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_detach() argument 216 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_detach() 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) in mthca_multicast_detach() 249 mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); in mthca_multicast_detach()
|
D | mthca_provider.h | 262 struct ib_qp ibqp; member 334 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) in to_mqp() argument 336 return container_of(ibqp, struct mthca_qp, ibqp); in to_mqp()
|
D | mthca_dev.h | 529 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 531 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 533 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 535 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 537 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 539 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 573 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 574 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
|
D | mthca_cq.c | 536 entry->qp = &(*cur_qp)->ibqp; in mthca_poll_one() 544 } else if ((*cur_qp)->ibqp.srq) { in mthca_poll_one() 545 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); in mthca_poll_one()
|
D | mthca_provider.c | 584 qp->ibqp.qp_num = qp->qpn; in mthca_create_qp() 598 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; in mthca_create_qp() 604 qp->ibqp.qp_num, init_attr->port_num, in mthca_create_qp() 624 return &qp->ibqp; in mthca_create_qp()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | qp.c | 247 if (qp->ibqp.qp_type == IB_QPT_UD) { in post_nop_wqe() 251 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); in post_nop_wqe() 288 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event() local 293 if (ibqp->event_handler) { in mlx4_ib_qp_event() 294 event.device = ibqp->device; in mlx4_ib_qp_event() 295 event.element.qp = ibqp; in mlx4_ib_qp_event() 327 ibqp->event_handler(&event, ibqp->qp_context); in mlx4_ib_qp_event() 956 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd() 957 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd() 959 return to_mpd(qp->ibqp.pd); in get_pd() [all …]
|
D | mlx4_ib.h | 272 struct ib_qp ibqp; member 613 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) in to_mqp() argument 615 return container_of(ibqp, struct mlx4_ib_qp, ibqp); in to_mqp() 699 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 701 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 703 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 705 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
D | main.c | 807 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) in add_gid_entry() argument 809 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in add_gid_entry() 810 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in add_gid_entry() 1266 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mlx4_ib_mcg_attach() argument 1269 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in mlx4_ib_mcg_attach() 1271 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in mlx4_ib_mcg_attach() 1303 err = add_gid_entry(ibqp, gid); in mlx4_ib_mcg_attach() 1344 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mlx4_ib_mcg_detach() argument 1347 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); in mlx4_ib_mcg_detach() 1349 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in mlx4_ib_mcg_detach() [all …]
|
D | cq.c | 575 ib_dma_sync_single_for_cpu(qp->ibqp.device, in use_tunnel_data() 617 wc->qp = &qp->ibqp; in mlx4_ib_qp_sw_comp() 723 wc->qp = &(*cur_qp)->ibqp; in mlx4_ib_poll_one() 747 } else if ((*cur_qp)->ibqp.srq) { in mlx4_ib_poll_one() 748 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx4_ib_poll_one()
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
D | usnic_ib_qp_grp.h | 34 struct ib_qp ibqp; member 113 struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp) in to_uqp_grp() argument 115 return container_of(ibqp, struct usnic_ib_qp_grp, ibqp); in to_uqp_grp()
|
D | usnic_ib_verbs.h | 45 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 63 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 65 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
D | usnic_ib_qp_grp.c | 70 qp_grp->ibqp.qp_num, in usnic_ib_qp_grp_dump_rows() 478 ib_event.element.qp = &qp_grp->ibqp; in usnic_ib_qp_grp_modify() 483 qp_grp->ibqp.event_handler(&ib_event, in usnic_ib_qp_grp_modify() 484 qp_grp->ibqp.qp_context); in usnic_ib_qp_grp_modify() 488 qp_grp->ibqp.event_handler(&ib_event, in usnic_ib_qp_grp_modify() 489 qp_grp->ibqp.qp_context); in usnic_ib_qp_grp_modify() 495 qp_grp->ibqp.event_handler(&ib_event, in usnic_ib_qp_grp_modify() 496 qp_grp->ibqp.qp_context); in usnic_ib_qp_grp_modify() 718 qp_grp->ibqp.qp_num = qp_grp->grp_id; in usnic_ib_qp_grp_create()
|
D | usnic_ib_verbs.c | 371 switch (qp_grp->ibqp.qp_type) { in usnic_ib_query_qp() 376 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); in usnic_ib_query_qp() 515 return &qp_grp->ibqp; in usnic_ib_create_qp() 546 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in usnic_ib_modify_qp() argument 553 qp_grp = to_uqp_grp(ibqp); in usnic_ib_modify_qp() 732 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in usnic_ib_post_send() argument 739 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in usnic_ib_post_recv() argument
|
D | usnic_ib_sysfs.c | 243 qp_grp->ibqp.qp_num, in summary_show()
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_provider.h | 108 struct ib_qp ibqp; member 158 static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp) in to_c2qp() argument 160 return container_of(ibqp, struct c2_qp, ibqp); in to_c2qp()
|
D | c2_cm.c | 44 struct ib_qp *ibqp; in c2_llp_connect() local 54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect() 55 if (!ibqp) in c2_llp_connect() 57 qp = to_c2qp(ibqp); in c2_llp_connect() 295 struct ib_qp *ibqp; in c2_llp_accept() local 301 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_accept() 302 if (!ibqp) in c2_llp_accept() 304 qp = to_c2qp(ibqp); in c2_llp_accept()
|
D | c2_provider.c | 202 static void c2_add_ref(struct ib_qp *ibqp) in c2_add_ref() argument 205 BUG_ON(!ibqp); in c2_add_ref() 206 qp = to_c2qp(ibqp); in c2_add_ref() 210 static void c2_rem_ref(struct ib_qp *ibqp) in c2_rem_ref() argument 213 BUG_ON(!ibqp); in c2_rem_ref() 214 qp = to_c2qp(ibqp); in c2_rem_ref() 229 return (qp?&qp->ibqp:NULL); in c2_get_qp() 275 return &qp->ibqp; in c2_create_qp() 558 static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in c2_modify_qp() argument 564 c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr, in c2_modify_qp() [all …]
|
D | c2_ae.c | 235 ib_event.element.qp = &qp->ibqp; in c2_ae_event() 238 if (qp->ibqp.event_handler) in c2_ae_event() 239 qp->ibqp.event_handler(&ib_event, in c2_ae_event() 240 qp->ibqp. in c2_ae_event()
|
D | c2_qp.c | 432 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp() 433 qp->ibqp.qp_type = IB_QPT_RC; in c2_alloc_qp() 605 send_cq = to_c2cq(qp->ibqp.send_cq); in c2_free_qp() 606 recv_cq = to_c2cq(qp->ibqp.recv_cq); in c2_free_qp() 790 int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, in c2_post_send() argument 793 struct c2_dev *c2dev = to_c2dev(ibqp->device); in c2_post_send() 794 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_send() 944 int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, in c2_post_receive() argument 947 struct c2_dev *c2dev = to_c2dev(ibqp->device); in c2_post_receive() 948 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_receive()
|
D | c2.h | 496 extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, 498 extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
|
D | c2_cq.c | 158 entry->qp = &qp->ibqp; in c2_poll_one()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_ev.c | 93 event.element.qp = &qhp->ibqp; in post_qp_event() 95 if (qhp->ibqp.event_handler) in post_qp_event() 96 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event() 129 iwch_qp_add_ref(&qhp->ibqp); in iwch_ev_dispatch() 229 iwch_qp_rem_ref(&qhp->ibqp); in iwch_ev_dispatch()
|
D | iwch_provider.h | 160 struct ib_qp ibqp; member 177 static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp) in to_iwch_qp() argument 179 return container_of(ibqp, struct iwch_qp, ibqp); in to_iwch_qp() 327 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 329 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
D | iwch_provider.c | 75 static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in iwch_multicast_attach() argument 80 static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in iwch_multicast_detach() argument 1000 iwch_destroy_qp(&qhp->ibqp); in iwch_create_qp() 1007 iwch_destroy_qp(&qhp->ibqp); in iwch_create_qp() 1024 iwch_destroy_qp(&qhp->ibqp); in iwch_create_qp() 1036 qhp->ibqp.qp_num = qhp->wq.qpid; in iwch_create_qp() 1043 return &qhp->ibqp; in iwch_create_qp() 1046 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in iwch_ib_modify_qp() argument 1054 PDBG("%s ib_qp %p\n", __func__, ibqp); in iwch_ib_modify_qp() 1065 qhp = to_iwch_qp(ibqp); in iwch_ib_modify_qp()
|
D | iwch_qp.c | 350 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in iwch_post_send() argument 365 qhp = to_iwch_qp(ibqp); in iwch_post_send() 465 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, in iwch_post_receive() argument 475 qhp = to_iwch_qp(ibqp); in iwch_post_receive() 860 if (qhp->ibqp.uobject) { in flush_qp() 917 if (!qhp->ibqp.uobject) in rdma_init() 1056 if (qhp->ibqp.uobject) in iwch_modify_qp()
|
D | iwch_cq.c | 82 wc->qp = &qhp->ibqp; in iwch_poll_cq_one()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | odp.c | 164 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_page_fault_resume() 190 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); in pagefault_single_data_segment() 219 if (mr->ibmr.pd != qp->ibqp.pd) { in pagefault_single_data_segment() 330 if (receive_queue && qp->ibqp.srq) in pagefault_data_segments() 394 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_mr_initiator_pfault_handler() 441 switch (qp->ibqp.qp_type) { in mlx5_ib_mr_initiator_pfault_handler() 484 qp->ibqp.qp_type, opcode); in mlx5_ib_mr_initiator_pfault_handler() 499 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_mr_responder_pfault_handler() 503 if (qp->ibqp.srq) { in mlx5_ib_mr_responder_pfault_handler() 518 switch (qp->ibqp.qp_type) { in mlx5_ib_mr_responder_pfault_handler() [all …]
|
D | qp.c | 124 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe() 136 qp->ibqp.qp_type); in mlx5_ib_read_user_wqe() 177 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx5_ib_qp_event() local 183 if (ibqp->event_handler) { in mlx5_ib_qp_event() 184 event.device = ibqp->device; in mlx5_ib_qp_event() 185 event.element.qp = ibqp; in mlx5_ib_qp_event() 216 ibqp->event_handler(&event, ibqp->qp_context); in mlx5_ib_qp_event() 1116 return to_mpd(qp->ibqp.pd); in get_pd() 1122 switch (qp->ibqp.qp_type) { in get_cqs() 1129 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs() [all …]
|
D | mlx5_ib.h | 176 struct ib_qp ibqp; member 491 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) in to_mqp() argument 493 return container_of(ibqp, struct mlx5_ib_qp, ibqp); in to_mqp() 547 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 549 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 552 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 554 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
D | cq.c | 173 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in handle_responder() 179 if (qp->ibqp.srq || qp->ibqp.xrcd) { in handle_responder() 182 if (qp->ibqp.xrcd) { in handle_responder() 187 srq = to_msrq(qp->ibqp.srq); in handle_responder() 448 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { in mlx5_poll_one() 463 wc->qp = &(*cur_qp)->ibqp; in mlx5_poll_one() 502 if ((*cur_qp)->ibqp.srq) { in mlx5_poll_one() 503 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx5_poll_one()
|
D | main.c | 706 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mlx5_ib_mcg_attach() argument 708 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_mcg_attach() 711 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); in mlx5_ib_mcg_attach() 714 ibqp->qp_num, gid->raw); in mlx5_ib_mcg_attach() 719 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mlx5_ib_mcg_detach() argument 721 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_mcg_detach() 724 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); in mlx5_ib_mcg_detach() 727 ibqp->qp_num, gid->raw); in mlx5_ib_mcg_detach()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | ev.c | 109 event.element.qp = &qhp->ibqp; in post_qp_event() 110 if (qhp->ibqp.event_handler) in post_qp_event() 111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event() 153 c4iw_qp_add_ref(&qhp->ibqp); in c4iw_ev_dispatch() 218 c4iw_qp_rem_ref(&qhp->ibqp); in c4iw_ev_dispatch()
|
D | iw_cxgb4.h | 479 struct ib_qp ibqp; member 493 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) in to_c4iw_qp() argument 495 return container_of(ibqp, struct c4iw_qp, ibqp); in to_c4iw_qp() 958 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 960 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1005 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1007 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
D | qp.c | 724 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in c4iw_post_send() argument 738 qhp = to_c4iw_qp(ibqp); in c4iw_post_send() 853 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, in c4iw_post_receive() argument 864 qhp = to_c4iw_qp(ibqp); in c4iw_post_receive() 1168 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in flush_qp() 1169 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp() 1172 if (qhp->ibqp.uobject) { in flush_qp() 1299 if (!qhp->ibqp.uobject) in rdma_init() 1784 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp() 1792 return &qhp->ibqp; in c4iw_create_qp() [all …]
|
D | provider.c | 72 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in c4iw_multicast_attach() argument 77 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in c4iw_multicast_detach() argument
|
D | cq.c | 242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq() 711 wc->qp = &qhp->ibqp; in c4iw_poll_cq_one()
|
D | device.c | 1354 c4iw_qp_add_ref(&qp->ibqp); in add_and_ref_qp() 1371 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); in deref_qps()
|
D | cm.c | 153 c4iw_qp_rem_ref(&ep->com.qp->ibqp); in deref_qp() 160 c4iw_qp_add_ref(&ep->com.qp->ibqp); in ref_qp()
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1392 qp->ibqp.qp_num = qp->id; in ocrdma_create_qp() 1394 return &qp->ibqp; in ocrdma_create_qp() 1410 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in _ocrdma_modify_qp() argument 1418 qp = get_ocrdma_qp(ibqp); in _ocrdma_modify_qp() 1419 dev = get_ocrdma_dev(ibqp->device); in _ocrdma_modify_qp() 1432 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in ocrdma_modify_qp() argument 1441 qp = get_ocrdma_qp(ibqp); in ocrdma_modify_qp() 1442 dev = get_ocrdma_dev(ibqp->device); in ocrdma_modify_qp() 1455 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, in ocrdma_modify_qp() 1459 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, in ocrdma_modify_qp() [all …]
|
D | ocrdma.h | 372 struct ib_qp ibqp; member 459 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) in get_ocrdma_qp() argument 461 return container_of(ibqp, struct ocrdma_qp, ibqp); in get_ocrdma_qp()
|
D | ocrdma_main.c | 577 if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { in ocrdma_close() 579 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); in ocrdma_close() 582 err_event.element.qp = &qp->ibqp; in ocrdma_close()
|
D | ocrdma_hw.c | 690 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 695 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 699 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 703 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 725 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 741 if (qp->ibqp.event_handler) in ocrdma_dispatch_ibevent() 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); in ocrdma_dispatch_ibevent() 2049 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); in ocrdma_flush_qp() 2438 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); in ocrdma_set_av_params() 2500 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); in ocrdma_set_qp_params()
|
/linux-4.1.27/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 211 static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, in nes_bind_mw() argument 215 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); in nes_bind_mw() 218 struct nes_qp *nesqp = to_nesqp(ibqp); in nes_bind_mw() 1265 nesqp->ibqp.qp_num = nesqp->hwqp.qp_id; in nes_create_qp() 1405 nes_add_ref(&nesqp->ibqp); in nes_create_qp() 1422 return &nesqp->ibqp; in nes_create_qp() 1462 static int nes_destroy_qp(struct ib_qp *ibqp) in nes_destroy_qp() argument 1464 struct nes_qp *nesqp = to_nesqp(ibqp); in nes_destroy_qp() 1478 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); in nes_destroy_qp() 1502 if ((ibqp->uobject)&&(ibqp->uobject->context)) { in nes_destroy_qp() [all …]
|
D | nes.c | 266 void nes_add_ref(struct ib_qp *ibqp) in nes_add_ref() argument 270 nesqp = to_nesqp(ibqp); in nes_add_ref() 272 ibqp->qp_num, atomic_read(&nesqp->refcount)); in nes_add_ref() 309 void nes_rem_ref(struct ib_qp *ibqp) in nes_rem_ref() argument 313 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); in nes_rem_ref() 319 nesqp = to_nesqp(ibqp); in nes_rem_ref() 323 __func__, ibqp->qp_num, nesqp->last_aeq); in nes_rem_ref() 371 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; in nes_get_qp()
|
D | nes_cm.c | 1735 nes_rem_ref(&nesqp->ibqp); in rem_ref_cm_node() 3001 nes_add_ref(&nesqp->ibqp); in nes_cm_disconn() 3021 nes_rem_ref(&nesqp->ibqp); in nes_disconnect_worker() 3060 nesvnic = to_nesvnic(nesqp->ibqp.device); in nes_cm_disconn_true() 3105 ibevent.device = nesqp->ibqp.device; in nes_cm_disconn_true() 3107 ibevent.element.qp = &nesqp->ibqp; in nes_cm_disconn_true() 3108 if (nesqp->ibqp.event_handler) in nes_cm_disconn_true() 3109 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); in nes_cm_disconn_true() 3172 nesvnic = to_nesvnic(nesqp->ibqp.device); in nes_disconnect() 3214 struct ib_qp *ibqp; in nes_accept() local [all …]
|
D | nes.h | 510 static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp) in to_nesqp() argument 512 return container_of(ibqp, struct nes_qp, ibqp); in to_nesqp()
|
D | nes_verbs.h | 135 struct ib_qp ibqp; member
|
D | nes_hw.c | 3449 struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device); in nes_terminate_done()
|