Home
last modified time | relevance | path

Searched refs:ibcq (Results 1 – 36 of 36) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter()
64 head = cq->ibcq.cqe; in ipath_cq_enter()
70 if (cq->ibcq.event_handler) { in ipath_cq_enter()
73 ev.device = cq->ibcq.device; in ipath_cq_enter()
74 ev.element.cq = &cq->ibcq; in ipath_cq_enter()
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in ipath_cq_enter()
115 to_idev(cq->ibcq.device)->n_wqe_errs++; in ipath_cq_enter()
129 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in ipath_poll_cq() argument
131 struct ipath_cq *cq = to_icq(ibcq); in ipath_poll_cq()
147 if (tail > (u32) cq->ibcq.cqe) in ipath_poll_cq()
[all …]
Dipath_verbs.h215 struct ib_cq ibcq; member
670 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq) in to_icq() argument
672 return container_of(ibcq, struct ipath_cq, ibcq); in to_icq()
808 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
814 int ipath_destroy_cq(struct ib_cq *ibcq);
816 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
818 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_cq.c66 if (head >= (unsigned) cq->ibcq.cqe) { in qib_cq_enter()
67 head = cq->ibcq.cqe; in qib_cq_enter()
73 if (cq->ibcq.event_handler) { in qib_cq_enter()
76 ev.device = cq->ibcq.device; in qib_cq_enter()
77 ev.element.cq = &cq->ibcq; in qib_cq_enter()
79 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in qib_cq_enter()
136 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in qib_poll_cq() argument
138 struct qib_cq *cq = to_icq(ibcq); in qib_poll_cq()
154 if (tail > (u32) cq->ibcq.cqe) in qib_poll_cq()
155 tail = (u32) cq->ibcq.cqe; in qib_poll_cq()
[all …]
Dqib_verbs.h270 struct ib_cq ibcq; member
823 static inline struct qib_cq *to_icq(struct ib_cq *ibcq) in to_icq() argument
825 return container_of(ibcq, struct qib_cq, ibcq); in to_icq()
1010 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1016 int qib_destroy_cq(struct ib_cq *ibcq);
1018 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1020 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dcq.c44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() local
45 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx4_ib_cq_comp()
51 struct ib_cq *ibcq; in mlx4_ib_cq_event() local
59 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
60 if (ibcq->event_handler) { in mlx4_ib_cq_event()
61 event.device = ibcq->device; in mlx4_ib_cq_event()
63 event.element.cq = ibcq; in mlx4_ib_cq_event()
64 ibcq->event_handler(&event, ibcq->cq_context); in mlx4_ib_cq_event()
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
[all …]
Dmlx4_ib.h104 struct ib_cq ibcq; member
578 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
580 return container_of(ibcq, struct mlx4_ib_cq, ibcq); in to_mcq()
670 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
675 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_cq.c181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
235 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
259 event.element.cq = &cq->ibcq; in mthca_cq_event()
260 if (cq->ibcq.event_handler) in mthca_cq_event()
261 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); in mthca_cq_event()
295 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); in mthca_cq_clean()
297 if (prod_index == cq->cons_index + cq->ibcq.cqe) in mthca_cq_clean()
309 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
315 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), in mthca_cq_clean()
321 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); in mthca_cq_clean()
[all …]
Dmthca_provider.h202 struct ib_cq ibcq; member
324 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
326 return container_of(ibcq, struct mthca_cq, ibcq); in to_mcq()
Dmthca_provider.c704 return &cq->ibcq; in mthca_create_cq()
767 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) in mthca_resize_cq() argument
769 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_resize_cq()
770 struct mthca_cq *cq = to_mcq(ibcq); in mthca_resize_cq()
781 if (entries == ibcq->cqe + 1) { in mthca_resize_cq()
821 tcqe = cq->ibcq.cqe; in mthca_resize_cq()
823 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq()
835 ibcq->cqe = entries - 1; in mthca_resize_cq()
Dmthca_dev.h494 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dcq.c41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() local
43 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx5_ib_cq_comp()
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
50 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() local
59 if (ibcq->event_handler) { in mlx5_ib_cq_event()
62 event.element.cq = ibcq; in mlx5_ib_cq_event()
63 ibcq->event_handler(&event, ibcq->cq_context); in mlx5_ib_cq_event()
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
406 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
[all …]
Dmlx5_ib.h268 struct ib_cq ibcq; member
466 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
468 return container_of(ibcq, struct mlx5_ib_cq, ibcq); in to_mcq()
563 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
564 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
566 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
609 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_ev.c89 event.device = chp->ibcq.device; in post_qp_event()
91 event.element.cq = &chp->ibcq; in post_qp_event()
99 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
178 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in iwch_ev_dispatch()
Diwch_provider.h102 struct ib_cq ibcq; member
112 static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq) in to_iwch_cq() argument
114 return container_of(ibcq, struct iwch_cq, ibcq); in to_iwch_cq()
334 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
Diwch_qp.c828 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in __flush_qp()
842 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); in __flush_qp()
864 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in flush_qp()
869 (*schp->ibcq.comp_handler)(&schp->ibcq, in flush_qp()
870 schp->ibcq.cq_context); in flush_qp()
Diwch_cq.c195 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in iwch_poll_cq() argument
203 chp = to_iwch_cq(ibcq); in iwch_poll_cq()
Diwch_provider.c191 chp->ibcq.cqe = 1 << chp->cq.size_log2; in iwch_create_cq()
207 iwch_destroy_cq(&chp->ibcq); in iwch_create_cq()
234 iwch_destroy_cq(&chp->ibcq); in iwch_create_cq()
242 return &chp->ibcq; in iwch_create_cq()
293 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; in iwch_resize_cq()
313 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) in iwch_arm_cq() argument
322 chp = to_iwch_cq(ibcq); in iwch_arm_cq()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dev.c105 event.device = chp->ibcq.device; in post_qp_event()
107 event.element.cq = &chp->ibcq; in post_qp_event()
114 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
Dqp.c1143 (*rchp->ibcq.comp_handler)(&rchp->ibcq, in __flush_qp()
1144 rchp->ibcq.cq_context); in __flush_qp()
1150 (*rchp->ibcq.comp_handler)(&rchp->ibcq, in __flush_qp()
1151 rchp->ibcq.cq_context); in __flush_qp()
1156 (*schp->ibcq.comp_handler)(&schp->ibcq, in __flush_qp()
1157 schp->ibcq.cq_context); in __flush_qp()
1175 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in flush_qp()
1180 (*schp->ibcq.comp_handler)(&schp->ibcq, in flush_qp()
1181 schp->ibcq.cq_context); in flush_qp()
Diw_cxgb4.h425 struct ib_cq ibcq; member
434 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) in to_c4iw_cq() argument
436 return container_of(ibcq, struct c4iw_cq, ibcq); in to_c4iw_cq()
957 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1000 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
Dcq.c828 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in c4iw_poll_cq() argument
835 chp = to_c4iw_cq(ibcq); in c4iw_poll_cq()
936 chp->ibcq.cqe = entries - 2; in c4iw_create_cq()
981 return &chp->ibcq; in c4iw_create_cq()
1001 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) in c4iw_arm_cq() argument
1007 chp = to_c4iw_cq(ibcq); in c4iw_arm_cq()
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_provider.h92 struct ib_cq ibcq; member
153 static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq) in to_c2cq() argument
155 return container_of(ibcq, struct c2_cq, ibcq); in to_c2cq()
Dc2_cq.c78 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in c2_cq_event()
201 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in c2_poll_cq() argument
203 struct c2_dev *c2dev = to_c2dev(ibcq->device); in c2_poll_cq()
204 struct c2_cq *cq = to_c2cq(ibcq); in c2_poll_cq()
222 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) in c2_arm_cq() argument
229 cq = to_c2cq(ibcq); in c2_arm_cq()
300 cq->ibcq.cqe = entries - 1; in c2_init_cq()
Dc2_ae.c310 ib_event.element.cq = &cq->ibcq; in c2_ae_event()
313 if (cq->ibcq.event_handler) in c2_ae_event()
314 cq->ibcq.event_handler(&ib_event, in c2_ae_event()
315 cq->ibcq.cq_context); in c2_ae_event()
Dc2.h517 extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
518 extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
Dc2_provider.c309 return &cq->ibcq; in c2_create_cq()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma.h297 struct ib_cq ibcq; member
454 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) in get_ocrdma_cq() argument
456 return container_of(ibcq, struct ocrdma_cq, ibcq); in get_ocrdma_cq()
Docrdma_verbs.c1045 return &cq->ibcq; in ocrdma_create_cq()
1053 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, in ocrdma_resize_cq() argument
1057 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); in ocrdma_resize_cq()
1063 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1073 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); in ocrdma_flush_cq()
1093 int ocrdma_destroy_cq(struct ib_cq *ibcq) in ocrdma_destroy_cq() argument
1095 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); in ocrdma_destroy_cq()
1097 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); in ocrdma_destroy_cq()
2822 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); in ocrdma_poll_hwcq()
2908 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in ocrdma_poll_cq() argument
[all …]
Docrdma_hw.c678 ib_evt.element.cq = &cq->ibcq; in ocrdma_dispatch_ibevent()
684 ib_evt.element.cq = &cq->ibcq; in ocrdma_dispatch_ibevent()
744 if (cq->ibcq.event_handler) in ocrdma_dispatch_ibevent()
745 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); in ocrdma_dispatch_ibevent()
893 if (bcq && bcq->ibcq.comp_handler) { in ocrdma_qp_buddy_cq_handler()
895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); in ocrdma_qp_buddy_cq_handler()
912 if (cq->ibcq.comp_handler) { in ocrdma_qp_cq_handler()
914 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in ocrdma_qp_cq_handler()
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_main.c534 struct ib_cq *ibcq; in ehca_create_aqp1() local
544 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); in ehca_create_aqp1()
545 if (IS_ERR(ibcq)) { in ehca_create_aqp1()
547 return PTR_ERR(ibcq); in ehca_create_aqp1()
549 sport->ibcq_aqp1 = ibcq; in ehca_create_aqp1()
558 qp_init_attr.send_cq = ibcq; in ehca_create_aqp1()
559 qp_init_attr.recv_cq = ibcq; in ehca_create_aqp1()
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes.h505 static inline struct nes_cq *to_nescq(struct ib_cq *ibcq) in to_nescq() argument
507 return container_of(ibcq, struct nes_cq, ibcq); in to_nescq()
Dnes_verbs.h112 struct ib_cq ibcq; member
Dnes_hw.c3734 if (nescq->ibcq.event_handler) { in nes_process_iwarp_aeqe()
3735 ibevent.device = nescq->ibcq.device; in nes_process_iwarp_aeqe()
3737 ibevent.element.cq = &nescq->ibcq; in nes_process_iwarp_aeqe()
3738 nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context); in nes_process_iwarp_aeqe()
3763 if (nescq->ibcq.comp_handler) in nes_iwarp_ce_handler()
3764 nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context); in nes_iwarp_ce_handler()
Dnes_verbs.c1571 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; in nes_create_cq()
1779 return &nescq->ibcq; in nes_create_cq()
3632 static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in nes_poll_cq() argument
3637 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); in nes_poll_cq()
3639 struct nes_cq *nescq = to_nescq(ibcq); in nes_poll_cq()
3807 static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) in nes_req_notify_cq() argument
3809 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); in nes_req_notify_cq()
3811 struct nes_cq *nescq = to_nescq(ibcq); in nes_req_notify_cq()
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.h67 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
Dusnic_ib_verbs.c746 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, in usnic_ib_poll_cq() argument