Lines Matching refs:cq
40 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) in mlx5_ib_cq_comp() argument
42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
49 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
51 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
63 event.element.cq = ibcq; in mlx5_ib_cq_event()
73 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
75 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe()
83 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
85 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
88 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
91 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
98 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() argument
100 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
410 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() argument
414 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
429 cqe = next_cqe_sw(cq); in mlx5_poll_one()
433 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
435 ++cq->mcq.cons_index; in mlx5_poll_one()
444 if (likely(cq->resize_buf)) { in mlx5_poll_one()
445 free_cq_buf(dev, &cq->buf); in mlx5_poll_one()
446 cq->buf = *cq->resize_buf; in mlx5_poll_one()
447 kfree(cq->resize_buf); in mlx5_poll_one()
448 cq->resize_buf = NULL; in mlx5_poll_one()
464 cq->mcq.cqn, qpn); in mlx5_poll_one()
498 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one()
531 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); in mlx5_poll_one()
541 cq->mcq.cqn, mr->sig->err_item.key, in mlx5_poll_one()
556 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local
562 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_poll_cq()
565 err = mlx5_poll_one(cq, &cur_qp, wc + npolled); in mlx5_ib_poll_cq()
571 mlx5_cq_set_ci(&cq->mcq); in mlx5_ib_poll_cq()
573 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_poll_cq()
612 struct ib_ucontext *context, struct mlx5_ib_cq *cq, in create_cq_user() argument
640 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user()
643 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
644 err = PTR_ERR(cq->buf.umem); in create_cq_user()
649 &cq->db); in create_cq_user()
653 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
664 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user()
672 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in create_cq_user()
675 ib_umem_release(cq->buf.umem); in create_cq_user()
679 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) in destroy_cq_user() argument
681 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in destroy_cq_user()
682 ib_umem_release(cq->buf.umem); in destroy_cq_user()
685 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) in init_cq_buf() argument
698 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in create_cq_kernel() argument
705 err = mlx5_db_alloc(dev->mdev, &cq->db); in create_cq_kernel()
709 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
710 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
711 cq->mcq.cqe_sz = cqe_size; in create_cq_kernel()
713 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); in create_cq_kernel()
717 init_cq_buf(cq, &cq->buf); in create_cq_kernel()
719 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; in create_cq_kernel()
725 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); in create_cq_kernel()
727 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_cq_kernel()
733 free_cq_buf(dev, &cq->buf); in create_cq_kernel()
736 mlx5_db_free(dev->mdev, &cq->db); in create_cq_kernel()
740 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in destroy_cq_kernel() argument
742 free_cq_buf(dev, &cq->buf); in destroy_cq_kernel()
743 mlx5_db_free(dev->mdev, &cq->db); in destroy_cq_kernel()
755 struct mlx5_ib_cq *cq; in mlx5_ib_create_cq() local
773 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx5_ib_create_cq()
774 if (!cq) in mlx5_ib_create_cq()
777 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
778 mutex_init(&cq->resize_mutex); in mlx5_ib_create_cq()
779 spin_lock_init(&cq->lock); in mlx5_ib_create_cq()
780 cq->resize_buf = NULL; in mlx5_ib_create_cq()
781 cq->resize_umem = NULL; in mlx5_ib_create_cq()
784 err = create_cq_user(dev, udata, context, cq, entries, in mlx5_ib_create_cq()
791 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, in mlx5_ib_create_cq()
797 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
805 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); in mlx5_ib_create_cq()
807 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); in mlx5_ib_create_cq()
811 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq()
812 cq->mcq.irqn = irqn; in mlx5_ib_create_cq()
813 cq->mcq.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
814 cq->mcq.event = mlx5_ib_cq_event; in mlx5_ib_create_cq()
817 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq()
824 return &cq->ibcq; in mlx5_ib_create_cq()
827 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); in mlx5_ib_create_cq()
832 destroy_cq_user(cq, context); in mlx5_ib_create_cq()
834 destroy_cq_kernel(dev, cq); in mlx5_ib_create_cq()
837 kfree(cq); in mlx5_ib_create_cq()
843 int mlx5_ib_destroy_cq(struct ib_cq *cq) in mlx5_ib_destroy_cq() argument
845 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
846 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_destroy_cq()
849 if (cq->uobject) in mlx5_ib_destroy_cq()
850 context = cq->uobject->context; in mlx5_ib_destroy_cq()
868 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) in __mlx5_ib_cq_clean() argument
876 if (!cq) in __mlx5_ib_cq_clean()
885 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) in __mlx5_ib_cq_clean()
886 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
892 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx5_ib_cq_clean()
893 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
894 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
900 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
901 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; in __mlx5_ib_cq_clean()
903 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
910 cq->mcq.cons_index += nfreed; in __mlx5_ib_cq_clean()
915 mlx5_cq_set_ci(&cq->mcq); in __mlx5_ib_cq_clean()
919 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
921 if (!cq) in mlx5_ib_cq_clean()
924 spin_lock_irq(&cq->lock); in mlx5_ib_cq_clean()
925 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
926 spin_unlock_irq(&cq->lock); in mlx5_ib_cq_clean()
929 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument
932 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
933 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_modify_cq()
958 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_user() argument
966 struct ib_ucontext *context = cq->buf.umem->context; in resize_user()
985 cq->resize_umem = umem; in resize_user()
991 static void un_resize_user(struct mlx5_ib_cq *cq) in un_resize_user() argument
993 ib_umem_release(cq->resize_umem); in un_resize_user()
996 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_kernel() argument
1001 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); in resize_kernel()
1002 if (!cq->resize_buf) in resize_kernel()
1005 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); in resize_kernel()
1009 init_cq_buf(cq, cq->resize_buf); in resize_kernel()
1014 kfree(cq->resize_buf); in resize_kernel()
1018 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in un_resize_kernel() argument
1020 free_cq_buf(dev, cq->resize_buf); in un_resize_kernel()
1021 cq->resize_buf = NULL; in un_resize_kernel()
1024 static int copy_resize_cqes(struct mlx5_ib_cq *cq) in copy_resize_cqes() argument
1026 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
1037 ssize = cq->buf.cqe_size; in copy_resize_cqes()
1038 dsize = cq->resize_buf->cqe_size; in copy_resize_cqes()
1044 i = cq->mcq.cons_index; in copy_resize_cqes()
1045 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1054 dcqe = get_cqe_from_buf(cq->resize_buf, in copy_resize_cqes()
1055 (i + 1) & (cq->resize_buf->nent), in copy_resize_cqes()
1058 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
1063 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1072 cq->mcq.cqn); in copy_resize_cqes()
1076 ++cq->mcq.cons_index; in copy_resize_cqes()
1083 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_resize_cq() local
1107 mutex_lock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1109 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, in mlx5_ib_resize_cq()
1113 err = resize_kernel(dev, cq, entries, cqe_size); in mlx5_ib_resize_cq()
1115 npas = cq->resize_buf->buf.npages; in mlx5_ib_resize_cq()
1116 page_shift = cq->resize_buf->buf.page_shift; in mlx5_ib_resize_cq()
1131 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, in mlx5_ib_resize_cq()
1134 mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); in mlx5_ib_resize_cq()
1144 in->cqn = cpu_to_be32(cq->mcq.cqn); in mlx5_ib_resize_cq()
1146 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); in mlx5_ib_resize_cq()
1151 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1152 ib_umem_release(cq->buf.umem); in mlx5_ib_resize_cq()
1153 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
1154 cq->resize_umem = NULL; in mlx5_ib_resize_cq()
1159 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_resize_cq()
1160 if (cq->resize_buf) { in mlx5_ib_resize_cq()
1161 err = copy_resize_cqes(cq); in mlx5_ib_resize_cq()
1163 tbuf = cq->buf; in mlx5_ib_resize_cq()
1164 cq->buf = *cq->resize_buf; in mlx5_ib_resize_cq()
1165 kfree(cq->resize_buf); in mlx5_ib_resize_cq()
1166 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1170 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1171 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_resize_cq()
1175 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1185 un_resize_user(cq); in mlx5_ib_resize_cq()
1187 un_resize_kernel(dev, cq); in mlx5_ib_resize_cq()
1189 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1195 struct mlx5_ib_cq *cq; in mlx5_ib_get_cqe_size() local
1200 cq = to_mcq(ibcq); in mlx5_ib_get_cqe_size()
1201 return cq->cqe_size; in mlx5_ib_get_cqe_size()