Lines Matching refs:cq
48 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) in ipath_cq_enter() argument
55 spin_lock_irqsave(&cq->lock, flags); in ipath_cq_enter()
61 wc = cq->queue; in ipath_cq_enter()
63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter()
64 head = cq->ibcq.cqe; in ipath_cq_enter()
69 spin_unlock_irqrestore(&cq->lock, flags); in ipath_cq_enter()
70 if (cq->ibcq.event_handler) { in ipath_cq_enter()
73 ev.device = cq->ibcq.device; in ipath_cq_enter()
74 ev.element.cq = &cq->ibcq; in ipath_cq_enter()
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in ipath_cq_enter()
80 if (cq->ip) { in ipath_cq_enter()
101 if (cq->notify == IB_CQ_NEXT_COMP || in ipath_cq_enter()
102 (cq->notify == IB_CQ_SOLICITED && solicited)) { in ipath_cq_enter()
103 cq->notify = IB_CQ_NONE; in ipath_cq_enter()
104 cq->triggered++; in ipath_cq_enter()
109 tasklet_hi_schedule(&cq->comptask); in ipath_cq_enter()
112 spin_unlock_irqrestore(&cq->lock, flags); in ipath_cq_enter()
115 to_idev(cq->ibcq.device)->n_wqe_errs++; in ipath_cq_enter()
131 struct ipath_cq *cq = to_icq(ibcq); in ipath_poll_cq() local
138 if (cq->ip) { in ipath_poll_cq()
143 spin_lock_irqsave(&cq->lock, flags); in ipath_poll_cq()
145 wc = cq->queue; in ipath_poll_cq()
147 if (tail > (u32) cq->ibcq.cqe) in ipath_poll_cq()
148 tail = (u32) cq->ibcq.cqe; in ipath_poll_cq()
154 if (tail >= cq->ibcq.cqe) in ipath_poll_cq()
161 spin_unlock_irqrestore(&cq->lock, flags); in ipath_poll_cq()
169 struct ipath_cq *cq = (struct ipath_cq *)data; in send_complete() local
179 u8 triggered = cq->triggered; in send_complete()
181 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
183 if (cq->triggered == triggered) in send_complete()
205 struct ipath_cq *cq; in ipath_create_cq() local
216 cq = kmalloc(sizeof(*cq), GFP_KERNEL); in ipath_create_cq()
217 if (!cq) { in ipath_create_cq()
247 cq->ip = ipath_create_mmap_info(dev, sz, context, wc); in ipath_create_cq()
248 if (!cq->ip) { in ipath_create_cq()
253 err = ib_copy_to_udata(udata, &cq->ip->offset, in ipath_create_cq()
254 sizeof(cq->ip->offset)); in ipath_create_cq()
260 cq->ip = NULL; in ipath_create_cq()
272 if (cq->ip) { in ipath_create_cq()
274 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); in ipath_create_cq()
283 cq->ibcq.cqe = entries; in ipath_create_cq()
284 cq->notify = IB_CQ_NONE; in ipath_create_cq()
285 cq->triggered = 0; in ipath_create_cq()
286 spin_lock_init(&cq->lock); in ipath_create_cq()
287 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); in ipath_create_cq()
290 cq->queue = wc; in ipath_create_cq()
292 ret = &cq->ibcq; in ipath_create_cq()
297 kfree(cq->ip); in ipath_create_cq()
301 kfree(cq); in ipath_create_cq()
317 struct ipath_cq *cq = to_icq(ibcq); in ipath_destroy_cq() local
319 tasklet_kill(&cq->comptask); in ipath_destroy_cq()
323 if (cq->ip) in ipath_destroy_cq()
324 kref_put(&cq->ip->ref, ipath_release_mmap_info); in ipath_destroy_cq()
326 vfree(cq->queue); in ipath_destroy_cq()
327 kfree(cq); in ipath_destroy_cq()
344 struct ipath_cq *cq = to_icq(ibcq); in ipath_req_notify_cq() local
348 spin_lock_irqsave(&cq->lock, flags); in ipath_req_notify_cq()
353 if (cq->notify != IB_CQ_NEXT_COMP) in ipath_req_notify_cq()
354 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; in ipath_req_notify_cq()
357 cq->queue->head != cq->queue->tail) in ipath_req_notify_cq()
360 spin_unlock_irqrestore(&cq->lock, flags); in ipath_req_notify_cq()
373 struct ipath_cq *cq = to_icq(ibcq); in ipath_resize_cq() local
408 spin_lock_irq(&cq->lock); in ipath_resize_cq()
413 old_wc = cq->queue; in ipath_resize_cq()
415 if (head > (u32) cq->ibcq.cqe) in ipath_resize_cq()
416 head = (u32) cq->ibcq.cqe; in ipath_resize_cq()
418 if (tail > (u32) cq->ibcq.cqe) in ipath_resize_cq()
419 tail = (u32) cq->ibcq.cqe; in ipath_resize_cq()
421 n = cq->ibcq.cqe + 1 + head - tail; in ipath_resize_cq()
429 if (cq->ip) in ipath_resize_cq()
433 if (tail == (u32) cq->ibcq.cqe) in ipath_resize_cq()
438 cq->ibcq.cqe = cqe; in ipath_resize_cq()
441 cq->queue = wc; in ipath_resize_cq()
442 spin_unlock_irq(&cq->lock); in ipath_resize_cq()
446 if (cq->ip) { in ipath_resize_cq()
448 struct ipath_mmap_info *ip = cq->ip; in ipath_resize_cq()
473 spin_unlock_irq(&cq->lock); in ipath_resize_cq()