Lines Matching refs:cq

555 static inline int t4_clear_cq_armed(struct t4_cq *cq)  in t4_clear_cq_armed()  argument
557 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed()
560 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument
564 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq()
565 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq()
567 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
568 writel(val, cq->gts); in t4_arm_cq()
569 cq->cidx_inc -= CIDXINC_M; in t4_arm_cq()
571 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | in t4_arm_cq()
572 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
573 writel(val, cq->gts); in t4_arm_cq()
574 cq->cidx_inc = 0; in t4_arm_cq()
578 static inline void t4_swcq_produce(struct t4_cq *cq) in t4_swcq_produce() argument
580 cq->sw_in_use++; in t4_swcq_produce()
581 if (cq->sw_in_use == cq->size) { in t4_swcq_produce()
582 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); in t4_swcq_produce()
583 cq->error = 1; in t4_swcq_produce()
586 if (++cq->sw_pidx == cq->size) in t4_swcq_produce()
587 cq->sw_pidx = 0; in t4_swcq_produce()
590 static inline void t4_swcq_consume(struct t4_cq *cq) in t4_swcq_consume() argument
592 BUG_ON(cq->sw_in_use < 1); in t4_swcq_consume()
593 cq->sw_in_use--; in t4_swcq_consume()
594 if (++cq->sw_cidx == cq->size) in t4_swcq_consume()
595 cq->sw_cidx = 0; in t4_swcq_consume()
598 static inline void t4_hwcq_consume(struct t4_cq *cq) in t4_hwcq_consume() argument
600 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; in t4_hwcq_consume()
601 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { in t4_hwcq_consume()
604 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | in t4_hwcq_consume()
605 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_hwcq_consume()
606 writel(val, cq->gts); in t4_hwcq_consume()
607 cq->cidx_inc = 0; in t4_hwcq_consume()
609 if (++cq->cidx == cq->size) { in t4_hwcq_consume()
610 cq->cidx = 0; in t4_hwcq_consume()
611 cq->gen ^= 1; in t4_hwcq_consume()
615 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() argument
617 return (CQE_GENBIT(cqe) == cq->gen); in t4_valid_cqe()
620 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() argument
625 if (cq->cidx == 0) in t4_next_hw_cqe()
626 prev_cidx = cq->size - 1; in t4_next_hw_cqe()
628 prev_cidx = cq->cidx - 1; in t4_next_hw_cqe()
630 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { in t4_next_hw_cqe()
632 cq->error = 1; in t4_next_hw_cqe()
633 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); in t4_next_hw_cqe()
635 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { in t4_next_hw_cqe()
639 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe()
646 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) in t4_next_sw_cqe() argument
648 if (cq->sw_in_use == cq->size) { in t4_next_sw_cqe()
649 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); in t4_next_sw_cqe()
650 cq->error = 1; in t4_next_sw_cqe()
654 if (cq->sw_in_use) in t4_next_sw_cqe()
655 return &cq->sw_queue[cq->sw_cidx]; in t4_next_sw_cqe()
659 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe() argument
663 if (cq->error) in t4_next_cqe()
665 else if (cq->sw_in_use) in t4_next_cqe()
666 *cqe = &cq->sw_queue[cq->sw_cidx]; in t4_next_cqe()
668 ret = t4_next_hw_cqe(cq, cqe); in t4_next_cqe()
672 static inline int t4_cq_in_error(struct t4_cq *cq) in t4_cq_in_error() argument
674 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; in t4_cq_in_error()
677 static inline void t4_set_cq_in_error(struct t4_cq *cq) in t4_set_cq_in_error() argument
679 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; in t4_set_cq_in_error()