Lines Matching refs:cq
48 struct c2_cq *cq; in c2_cq_get() local
52 cq = c2dev->qptr_array[cqn]; in c2_cq_get()
53 if (!cq) { in c2_cq_get()
57 atomic_inc(&cq->refcount); in c2_cq_get()
59 return cq; in c2_cq_get()
62 static void c2_cq_put(struct c2_cq *cq) in c2_cq_put() argument
64 if (atomic_dec_and_test(&cq->refcount)) in c2_cq_put()
65 wake_up(&cq->wait); in c2_cq_put()
70 struct c2_cq *cq; in c2_cq_event() local
72 cq = c2_cq_get(c2dev, mq_index); in c2_cq_event()
73 if (!cq) { in c2_cq_event()
78 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in c2_cq_event()
79 c2_cq_put(cq); in c2_cq_event()
84 struct c2_cq *cq; in c2_cq_clean() local
87 cq = c2_cq_get(c2dev, mq_index); in c2_cq_clean()
88 if (!cq) in c2_cq_clean()
91 spin_lock_irq(&cq->lock); in c2_cq_clean()
92 q = &cq->mq; in c2_cq_clean()
106 spin_unlock_irq(&cq->lock); in c2_cq_clean()
107 c2_cq_put(cq); in c2_cq_clean()
132 struct c2_cq *cq, struct ib_wc *entry) in c2_poll_one() argument
138 ce = c2_mq_consume(&cq->mq); in c2_poll_one()
150 c2_mq_free(&cq->mq); in c2_poll_one()
151 ce = c2_mq_consume(&cq->mq); in c2_poll_one()
196 c2_mq_free(&cq->mq); in c2_poll_one()
204 struct c2_cq *cq = to_c2cq(ibcq); in c2_poll_cq() local
208 spin_lock_irqsave(&cq->lock, flags); in c2_poll_cq()
212 err = c2_poll_one(c2dev, cq, entry + npolled); in c2_poll_cq()
217 spin_unlock_irqrestore(&cq->lock, flags); in c2_poll_cq()
225 struct c2_cq *cq; in c2_arm_cq() local
229 cq = to_c2cq(ibcq); in c2_arm_cq()
230 shared = cq->mq.peer; in c2_arm_cq()
249 spin_lock_irqsave(&cq->lock, flags); in c2_arm_cq()
250 ret = !c2_mq_empty(&cq->mq); in c2_arm_cq()
251 spin_unlock_irqrestore(&cq->lock, flags); in c2_arm_cq()
290 struct c2_ucontext *ctx, struct c2_cq *cq) in c2_init_cq() argument
300 cq->ibcq.cqe = entries - 1; in c2_init_cq()
301 cq->is_kernel = !ctx; in c2_init_cq()
304 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_init_cq()
305 &cq->mq.shared_dma, GFP_KERNEL); in c2_init_cq()
306 if (!cq->mq.shared) in c2_init_cq()
310 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE); in c2_init_cq()
324 wr.msg_size = cpu_to_be32(cq->mq.msg_size); in c2_init_cq()
325 wr.depth = cpu_to_be32(cq->mq.q_size); in c2_init_cq()
326 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); in c2_init_cq()
327 wr.msg_pool = cpu_to_be64(cq->mq.host_dma); in c2_init_cq()
328 wr.user_context = (u64) (unsigned long) (cq); in c2_init_cq()
351 cq->adapter_handle = reply->cq_handle; in c2_init_cq()
352 cq->mq.index = be32_to_cpu(reply->mq_index); in c2_init_cq()
355 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE); in c2_init_cq()
356 if (!cq->mq.peer) { in c2_init_cq()
364 spin_lock_init(&cq->lock); in c2_init_cq()
365 atomic_set(&cq->refcount, 1); in c2_init_cq()
366 init_waitqueue_head(&cq->wait); in c2_init_cq()
372 cq->cqn = cq->mq.index; in c2_init_cq()
373 c2dev->qptr_array[cq->cqn] = cq; in c2_init_cq()
382 c2_free_cq_buf(c2dev, &cq->mq); in c2_init_cq()
384 c2_free_mqsp(cq->mq.shared); in c2_init_cq()
389 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq) in c2_free_cq() argument
400 c2dev->qptr_array[cq->mq.index] = NULL; in c2_free_cq()
401 atomic_dec(&cq->refcount); in c2_free_cq()
404 wait_event(cq->wait, !atomic_read(&cq->refcount)); in c2_free_cq()
415 wr.cq_handle = cq->adapter_handle; in c2_free_cq()
435 if (cq->is_kernel) { in c2_free_cq()
436 c2_free_cq_buf(c2dev, &cq->mq); in c2_free_cq()