Lines Matching refs:cq
42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp()
48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument
55 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event()
59 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
63 event.element.cq = ibcq; in mlx4_ib_cq_event()
73 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument
75 return get_cqe_from_buf(&cq->buf, n); in get_cqe()
78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
87 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) in next_cqe_sw() argument
89 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
92 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx4_ib_modify_cq() argument
94 struct mlx4_ib_cq *mcq = to_mcq(cq); in mlx4_ib_modify_cq()
95 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_modify_cq()
178 struct mlx4_ib_cq *cq; in mlx4_ib_create_cq() local
188 cq = kmalloc(sizeof *cq, GFP_KERNEL); in mlx4_ib_create_cq()
189 if (!cq) in mlx4_ib_create_cq()
193 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
194 mutex_init(&cq->resize_mutex); in mlx4_ib_create_cq()
195 spin_lock_init(&cq->lock); in mlx4_ib_create_cq()
196 cq->resize_buf = NULL; in mlx4_ib_create_cq()
197 cq->resize_umem = NULL; in mlx4_ib_create_cq()
198 cq->create_flags = attr->flags; in mlx4_ib_create_cq()
199 INIT_LIST_HEAD(&cq->send_qp_list); in mlx4_ib_create_cq()
200 INIT_LIST_HEAD(&cq->recv_qp_list); in mlx4_ib_create_cq()
210 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq()
216 &cq->db); in mlx4_ib_create_cq()
222 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); in mlx4_ib_create_cq()
226 cq->mcq.set_ci_db = cq->db.db; in mlx4_ib_create_cq()
227 cq->mcq.arm_db = cq->db.db + 1; in mlx4_ib_create_cq()
228 *cq->mcq.set_ci_db = 0; in mlx4_ib_create_cq()
229 *cq->mcq.arm_db = 0; in mlx4_ib_create_cq()
231 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); in mlx4_ib_create_cq()
241 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, in mlx4_ib_create_cq()
242 cq->db.dma, &cq->mcq, vector, 0, in mlx4_ib_create_cq()
243 !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION)); in mlx4_ib_create_cq()
248 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; in mlx4_ib_create_cq()
250 cq->mcq.comp = mlx4_ib_cq_comp; in mlx4_ib_create_cq()
251 cq->mcq.event = mlx4_ib_cq_event; in mlx4_ib_create_cq()
254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { in mlx4_ib_create_cq()
259 return &cq->ibcq; in mlx4_ib_create_cq()
263 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); in mlx4_ib_create_cq()
266 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); in mlx4_ib_create_cq()
269 ib_umem_release(cq->umem); in mlx4_ib_create_cq()
271 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
275 mlx4_db_free(dev->dev, &cq->db); in mlx4_ib_create_cq()
278 kfree(cq); in mlx4_ib_create_cq()
283 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, in mlx4_alloc_resize_buf() argument
288 if (cq->resize_buf) in mlx4_alloc_resize_buf()
291 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); in mlx4_alloc_resize_buf()
292 if (!cq->resize_buf) in mlx4_alloc_resize_buf()
295 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); in mlx4_alloc_resize_buf()
297 kfree(cq->resize_buf); in mlx4_alloc_resize_buf()
298 cq->resize_buf = NULL; in mlx4_alloc_resize_buf()
302 cq->resize_buf->cqe = entries - 1; in mlx4_alloc_resize_buf()
307 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, in mlx4_alloc_resize_umem() argument
313 if (cq->resize_umem) in mlx4_alloc_resize_umem()
319 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); in mlx4_alloc_resize_umem()
320 if (!cq->resize_buf) in mlx4_alloc_resize_umem()
323 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, in mlx4_alloc_resize_umem()
324 &cq->resize_umem, ucmd.buf_addr, entries); in mlx4_alloc_resize_umem()
326 kfree(cq->resize_buf); in mlx4_alloc_resize_umem()
327 cq->resize_buf = NULL; in mlx4_alloc_resize_umem()
331 cq->resize_buf->cqe = entries - 1; in mlx4_alloc_resize_umem()
336 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) in mlx4_ib_get_outstanding_cqes() argument
340 i = cq->mcq.cons_index; in mlx4_ib_get_outstanding_cqes()
341 while (get_sw_cqe(cq, i)) in mlx4_ib_get_outstanding_cqes()
344 return i - cq->mcq.cons_index; in mlx4_ib_get_outstanding_cqes()
347 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) in mlx4_ib_cq_resize_copy_cqes() argument
351 int cqe_size = cq->buf.entry_size; in mlx4_ib_cq_resize_copy_cqes()
354 i = cq->mcq.cons_index; in mlx4_ib_cq_resize_copy_cqes()
355 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
359 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, in mlx4_ib_cq_resize_copy_cqes()
360 (i + 1) & cq->resize_buf->cqe); in mlx4_ib_cq_resize_copy_cqes()
361 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); in mlx4_ib_cq_resize_copy_cqes()
365 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); in mlx4_ib_cq_resize_copy_cqes()
366 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
369 ++cq->mcq.cons_index; in mlx4_ib_cq_resize_copy_cqes()
375 struct mlx4_ib_cq *cq = to_mcq(ibcq); in mlx4_ib_resize_cq() local
380 mutex_lock(&cq->resize_mutex); in mlx4_ib_resize_cq()
398 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); in mlx4_ib_resize_cq()
403 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); in mlx4_ib_resize_cq()
409 err = mlx4_alloc_resize_buf(dev, cq, entries); in mlx4_ib_resize_cq()
414 mtt = cq->buf.mtt; in mlx4_ib_resize_cq()
416 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); in mlx4_ib_resize_cq()
422 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
423 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
424 ib_umem_release(cq->umem); in mlx4_ib_resize_cq()
425 cq->umem = cq->resize_umem; in mlx4_ib_resize_cq()
427 kfree(cq->resize_buf); in mlx4_ib_resize_cq()
428 cq->resize_buf = NULL; in mlx4_ib_resize_cq()
429 cq->resize_umem = NULL; in mlx4_ib_resize_cq()
434 spin_lock_irq(&cq->lock); in mlx4_ib_resize_cq()
435 if (cq->resize_buf) { in mlx4_ib_resize_cq()
436 mlx4_ib_cq_resize_copy_cqes(cq); in mlx4_ib_resize_cq()
437 tmp_buf = cq->buf; in mlx4_ib_resize_cq()
438 tmp_cqe = cq->ibcq.cqe; in mlx4_ib_resize_cq()
439 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
440 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
442 kfree(cq->resize_buf); in mlx4_ib_resize_cq()
443 cq->resize_buf = NULL; in mlx4_ib_resize_cq()
445 spin_unlock_irq(&cq->lock); in mlx4_ib_resize_cq()
454 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); in mlx4_ib_resize_cq()
456 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, in mlx4_ib_resize_cq()
457 cq->resize_buf->cqe); in mlx4_ib_resize_cq()
459 kfree(cq->resize_buf); in mlx4_ib_resize_cq()
460 cq->resize_buf = NULL; in mlx4_ib_resize_cq()
462 if (cq->resize_umem) { in mlx4_ib_resize_cq()
463 ib_umem_release(cq->resize_umem); in mlx4_ib_resize_cq()
464 cq->resize_umem = NULL; in mlx4_ib_resize_cq()
468 mutex_unlock(&cq->resize_mutex); in mlx4_ib_resize_cq()
473 int mlx4_ib_destroy_cq(struct ib_cq *cq) in mlx4_ib_destroy_cq() argument
475 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_destroy_cq()
476 struct mlx4_ib_cq *mcq = to_mcq(cq); in mlx4_ib_destroy_cq()
481 if (cq->uobject) { in mlx4_ib_destroy_cq()
482 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); in mlx4_ib_destroy_cq()
485 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); in mlx4_ib_destroy_cq()
579 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, in use_tunnel_data() argument
631 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, in mlx4_ib_poll_sw_comp() argument
640 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { in mlx4_ib_poll_sw_comp()
646 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { in mlx4_ib_poll_sw_comp()
656 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, in mlx4_ib_poll_one() argument
673 cqe = next_cqe_sw(cq); in mlx4_ib_poll_one()
677 if (cq->buf.entry_size == 64) in mlx4_ib_poll_one()
680 ++cq->mcq.cons_index; in mlx4_ib_poll_one()
700 if (cq->resize_buf) { in mlx4_ib_poll_one()
701 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_one()
703 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_poll_one()
704 cq->buf = cq->resize_buf->buf; in mlx4_ib_poll_one()
705 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_poll_one()
707 kfree(cq->resize_buf); in mlx4_ib_poll_one()
708 cq->resize_buf = NULL; in mlx4_ib_poll_one()
721 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
725 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); in mlx4_ib_poll_one()
739 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
743 cq->mcq.cqn, srq_num); in mlx4_ib_poll_one()
855 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { in mlx4_ib_poll_one()
859 return use_tunnel_data(*cur_qp, cq, wc, tail, in mlx4_ib_poll_one()
893 struct mlx4_ib_cq *cq = to_mcq(ibcq); in mlx4_ib_poll_cq() local
898 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_cq()
900 spin_lock_irqsave(&cq->lock, flags); in mlx4_ib_poll_cq()
902 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); in mlx4_ib_poll_cq()
907 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); in mlx4_ib_poll_cq()
912 mlx4_cq_set_ci(&cq->mcq); in mlx4_ib_poll_cq()
915 spin_unlock_irqrestore(&cq->lock, flags); in mlx4_ib_poll_cq()
934 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument
940 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; in __mlx4_ib_cq_clean()
949 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) in __mlx4_ib_cq_clean()
950 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx4_ib_cq_clean()
957 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx4_ib_cq_clean()
958 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
966 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
977 cq->mcq.cons_index += nfreed; in __mlx4_ib_cq_clean()
983 mlx4_cq_set_ci(&cq->mcq); in __mlx4_ib_cq_clean()
987 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in mlx4_ib_cq_clean() argument
989 spin_lock_irq(&cq->lock); in mlx4_ib_cq_clean()
990 __mlx4_ib_cq_clean(cq, qpn, srq); in mlx4_ib_cq_clean()
991 spin_unlock_irq(&cq->lock); in mlx4_ib_cq_clean()