Lines Matching refs:cq
39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) in mlx5_ib_cq_comp() argument
41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
50 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
62 event.element.cq = ibcq; in mlx5_ib_cq_event()
72 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe()
82 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
97 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() argument
99 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
402 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() argument
406 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
421 cqe = next_cqe_sw(cq); in mlx5_poll_one()
425 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
427 ++cq->mcq.cons_index; in mlx5_poll_one()
436 if (likely(cq->resize_buf)) { in mlx5_poll_one()
437 free_cq_buf(dev, &cq->buf); in mlx5_poll_one()
438 cq->buf = *cq->resize_buf; in mlx5_poll_one()
439 kfree(cq->resize_buf); in mlx5_poll_one()
440 cq->resize_buf = NULL; in mlx5_poll_one()
456 cq->mcq.cqn, qpn); in mlx5_poll_one()
490 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one()
523 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); in mlx5_poll_one()
533 cq->mcq.cqn, mr->sig->err_item.key, in mlx5_poll_one()
548 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local
554 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_poll_cq()
557 err = mlx5_poll_one(cq, &cur_qp, wc + npolled); in mlx5_ib_poll_cq()
563 mlx5_cq_set_ci(&cq->mcq); in mlx5_ib_poll_cq()
565 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_poll_cq()
605 struct ib_ucontext *context, struct mlx5_ib_cq *cq, in create_cq_user() argument
633 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user()
636 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
637 err = PTR_ERR(cq->buf.umem); in create_cq_user()
642 &cq->db); in create_cq_user()
646 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
657 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user()
665 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in create_cq_user()
668 ib_umem_release(cq->buf.umem); in create_cq_user()
672 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) in destroy_cq_user() argument
674 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in destroy_cq_user()
675 ib_umem_release(cq->buf.umem); in destroy_cq_user()
678 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) in init_cq_buf() argument
691 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in create_cq_kernel() argument
698 err = mlx5_db_alloc(dev->mdev, &cq->db); in create_cq_kernel()
702 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
703 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
704 cq->mcq.cqe_sz = cqe_size; in create_cq_kernel()
706 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); in create_cq_kernel()
710 init_cq_buf(cq, &cq->buf); in create_cq_kernel()
712 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; in create_cq_kernel()
718 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); in create_cq_kernel()
720 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_cq_kernel()
726 free_cq_buf(dev, &cq->buf); in create_cq_kernel()
729 mlx5_db_free(dev->mdev, &cq->db); in create_cq_kernel()
733 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in destroy_cq_kernel() argument
735 free_cq_buf(dev, &cq->buf); in destroy_cq_kernel()
736 mlx5_db_free(dev->mdev, &cq->db); in destroy_cq_kernel()
745 struct mlx5_ib_cq *cq; in mlx5_ib_create_cq() local
760 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx5_ib_create_cq()
761 if (!cq) in mlx5_ib_create_cq()
764 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
765 mutex_init(&cq->resize_mutex); in mlx5_ib_create_cq()
766 spin_lock_init(&cq->lock); in mlx5_ib_create_cq()
767 cq->resize_buf = NULL; in mlx5_ib_create_cq()
768 cq->resize_umem = NULL; in mlx5_ib_create_cq()
771 err = create_cq_user(dev, udata, context, cq, entries, in mlx5_ib_create_cq()
778 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, in mlx5_ib_create_cq()
784 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
792 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); in mlx5_ib_create_cq()
794 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); in mlx5_ib_create_cq()
798 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq()
799 cq->mcq.irqn = irqn; in mlx5_ib_create_cq()
800 cq->mcq.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
801 cq->mcq.event = mlx5_ib_cq_event; in mlx5_ib_create_cq()
804 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq()
811 return &cq->ibcq; in mlx5_ib_create_cq()
814 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); in mlx5_ib_create_cq()
819 destroy_cq_user(cq, context); in mlx5_ib_create_cq()
821 destroy_cq_kernel(dev, cq); in mlx5_ib_create_cq()
824 kfree(cq); in mlx5_ib_create_cq()
830 int mlx5_ib_destroy_cq(struct ib_cq *cq) in mlx5_ib_destroy_cq() argument
832 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
833 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_destroy_cq()
836 if (cq->uobject) in mlx5_ib_destroy_cq()
837 context = cq->uobject->context; in mlx5_ib_destroy_cq()
855 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) in __mlx5_ib_cq_clean() argument
863 if (!cq) in __mlx5_ib_cq_clean()
872 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) in __mlx5_ib_cq_clean()
873 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
879 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx5_ib_cq_clean()
880 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
881 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
887 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
888 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; in __mlx5_ib_cq_clean()
890 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
897 cq->mcq.cons_index += nfreed; in __mlx5_ib_cq_clean()
902 mlx5_cq_set_ci(&cq->mcq); in __mlx5_ib_cq_clean()
906 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
908 if (!cq) in mlx5_ib_cq_clean()
911 spin_lock_irq(&cq->lock); in mlx5_ib_cq_clean()
912 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
913 spin_unlock_irq(&cq->lock); in mlx5_ib_cq_clean()
916 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument
919 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
920 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_modify_cq()
945 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_user() argument
953 struct ib_ucontext *context = cq->buf.umem->context; in resize_user()
972 cq->resize_umem = umem; in resize_user()
978 static void un_resize_user(struct mlx5_ib_cq *cq) in un_resize_user() argument
980 ib_umem_release(cq->resize_umem); in un_resize_user()
983 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_kernel() argument
988 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); in resize_kernel()
989 if (!cq->resize_buf) in resize_kernel()
992 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); in resize_kernel()
996 init_cq_buf(cq, cq->resize_buf); in resize_kernel()
1001 kfree(cq->resize_buf); in resize_kernel()
1005 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in un_resize_kernel() argument
1007 free_cq_buf(dev, cq->resize_buf); in un_resize_kernel()
1008 cq->resize_buf = NULL; in un_resize_kernel()
1011 static int copy_resize_cqes(struct mlx5_ib_cq *cq) in copy_resize_cqes() argument
1013 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
1024 ssize = cq->buf.cqe_size; in copy_resize_cqes()
1025 dsize = cq->resize_buf->cqe_size; in copy_resize_cqes()
1031 i = cq->mcq.cons_index; in copy_resize_cqes()
1032 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1041 dcqe = get_cqe_from_buf(cq->resize_buf, in copy_resize_cqes()
1042 (i + 1) & (cq->resize_buf->nent), in copy_resize_cqes()
1045 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
1050 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1059 cq->mcq.cqn); in copy_resize_cqes()
1063 ++cq->mcq.cons_index; in copy_resize_cqes()
1070 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_resize_cq() local
1094 mutex_lock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1096 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, in mlx5_ib_resize_cq()
1100 err = resize_kernel(dev, cq, entries, cqe_size); in mlx5_ib_resize_cq()
1102 npas = cq->resize_buf->buf.npages; in mlx5_ib_resize_cq()
1103 page_shift = cq->resize_buf->buf.page_shift; in mlx5_ib_resize_cq()
1118 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, in mlx5_ib_resize_cq()
1121 mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); in mlx5_ib_resize_cq()
1131 in->cqn = cpu_to_be32(cq->mcq.cqn); in mlx5_ib_resize_cq()
1133 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); in mlx5_ib_resize_cq()
1138 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1139 ib_umem_release(cq->buf.umem); in mlx5_ib_resize_cq()
1140 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
1141 cq->resize_umem = NULL; in mlx5_ib_resize_cq()
1146 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_resize_cq()
1147 if (cq->resize_buf) { in mlx5_ib_resize_cq()
1148 err = copy_resize_cqes(cq); in mlx5_ib_resize_cq()
1150 tbuf = cq->buf; in mlx5_ib_resize_cq()
1151 cq->buf = *cq->resize_buf; in mlx5_ib_resize_cq()
1152 kfree(cq->resize_buf); in mlx5_ib_resize_cq()
1153 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1157 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1158 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_resize_cq()
1162 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1172 un_resize_user(cq); in mlx5_ib_resize_cq()
1174 un_resize_kernel(dev, cq); in mlx5_ib_resize_cq()
1176 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1182 struct mlx5_ib_cq *cq; in mlx5_ib_get_cqe_size() local
1187 cq = to_mcq(ibcq); in mlx5_ib_get_cqe_size()
1188 return cq->cqe_size; in mlx5_ib_get_cqe_size()