root/drivers/infiniband/hw/mlx5/srq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_wqe
  2. mlx5_ib_srq_event
  3. create_srq_user
  4. create_srq_kernel
  5. destroy_srq_user
  6. destroy_srq_kernel
  7. mlx5_ib_create_srq
  8. mlx5_ib_modify_srq
  9. mlx5_ib_query_srq
  10. mlx5_ib_destroy_srq
  11. mlx5_ib_free_srq_wqe
  12. mlx5_ib_post_srq_recv

   1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2 /*
   3  * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
   4  */
   5 
   6 #include <linux/module.h>
   7 #include <linux/mlx5/qp.h>
   8 #include <linux/slab.h>
   9 #include <rdma/ib_umem.h>
  10 #include <rdma/ib_user_verbs.h>
  11 #include "mlx5_ib.h"
  12 #include "srq.h"
  13 
  14 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
  15 {
  16         return mlx5_frag_buf_get_wqe(&srq->fbc, n);
  17 }
  18 
  19 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
  20 {
  21         struct ib_event event;
  22         struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
  23 
  24         if (ibsrq->event_handler) {
  25                 event.device      = ibsrq->device;
  26                 event.element.srq = ibsrq;
  27                 switch (type) {
  28                 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
  29                         event.event = IB_EVENT_SRQ_LIMIT_REACHED;
  30                         break;
  31                 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
  32                         event.event = IB_EVENT_SRQ_ERR;
  33                         break;
  34                 default:
  35                         pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
  36                                 type, srq->srqn);
  37                         return;
  38                 }
  39 
  40                 ibsrq->event_handler(&event, ibsrq->srq_context);
  41         }
  42 }
  43 
  44 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
  45                            struct mlx5_srq_attr *in,
  46                            struct ib_udata *udata, int buf_size)
  47 {
  48         struct mlx5_ib_dev *dev = to_mdev(pd->device);
  49         struct mlx5_ib_create_srq ucmd = {};
  50         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
  51                 udata, struct mlx5_ib_ucontext, ibucontext);
  52         size_t ucmdlen;
  53         int err;
  54         int npages;
  55         int page_shift;
  56         int ncont;
  57         u32 offset;
  58         u32 uidx = MLX5_IB_DEFAULT_UIDX;
  59 
  60         ucmdlen = min(udata->inlen, sizeof(ucmd));
  61 
  62         if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
  63                 mlx5_ib_dbg(dev, "failed copy udata\n");
  64                 return -EFAULT;
  65         }
  66 
  67         if (ucmd.reserved0 || ucmd.reserved1)
  68                 return -EINVAL;
  69 
  70         if (udata->inlen > sizeof(ucmd) &&
  71             !ib_is_udata_cleared(udata, sizeof(ucmd),
  72                                  udata->inlen - sizeof(ucmd)))
  73                 return -EINVAL;
  74 
  75         if (in->type != IB_SRQT_BASIC) {
  76                 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
  77                 if (err)
  78                         return err;
  79         }
  80 
  81         srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
  82 
  83         srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
  84         if (IS_ERR(srq->umem)) {
  85                 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
  86                 err = PTR_ERR(srq->umem);
  87                 return err;
  88         }
  89 
  90         mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
  91                            &page_shift, &ncont, NULL);
  92         err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
  93                                      &offset);
  94         if (err) {
  95                 mlx5_ib_warn(dev, "bad offset\n");
  96                 goto err_umem;
  97         }
  98 
  99         in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
 100         if (!in->pas) {
 101                 err = -ENOMEM;
 102                 goto err_umem;
 103         }
 104 
 105         mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
 106 
 107         err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
 108         if (err) {
 109                 mlx5_ib_dbg(dev, "map doorbell failed\n");
 110                 goto err_in;
 111         }
 112 
 113         in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 114         in->page_offset = offset;
 115         in->uid = (in->type != IB_SRQT_XRC) ?  to_mpd(pd)->uid : 0;
 116         if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
 117             in->type != IB_SRQT_BASIC)
 118                 in->user_index = uidx;
 119 
 120         return 0;
 121 
 122 err_in:
 123         kvfree(in->pas);
 124 
 125 err_umem:
 126         ib_umem_release(srq->umem);
 127 
 128         return err;
 129 }
 130 
 131 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
 132                              struct mlx5_srq_attr *in, int buf_size)
 133 {
 134         int err;
 135         int i;
 136         struct mlx5_wqe_srq_next_seg *next;
 137 
 138         err = mlx5_db_alloc(dev->mdev, &srq->db);
 139         if (err) {
 140                 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
 141                 return err;
 142         }
 143 
 144         if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
 145                                      dev->mdev->priv.numa_node)) {
 146                 mlx5_ib_dbg(dev, "buf alloc failed\n");
 147                 err = -ENOMEM;
 148                 goto err_db;
 149         }
 150 
 151         mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
 152                       &srq->fbc);
 153 
 154         srq->head    = 0;
 155         srq->tail    = srq->msrq.max - 1;
 156         srq->wqe_ctr = 0;
 157 
 158         for (i = 0; i < srq->msrq.max; i++) {
 159                 next = get_wqe(srq, i);
 160                 next->next_wqe_index =
 161                         cpu_to_be16((i + 1) & (srq->msrq.max - 1));
 162         }
 163 
 164         mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
 165         in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
 166         if (!in->pas) {
 167                 err = -ENOMEM;
 168                 goto err_buf;
 169         }
 170         mlx5_fill_page_frag_array(&srq->buf, in->pas);
 171 
 172         srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
 173         if (!srq->wrid) {
 174                 err = -ENOMEM;
 175                 goto err_in;
 176         }
 177         srq->wq_sig = 0;
 178 
 179         in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 180         if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
 181             in->type != IB_SRQT_BASIC)
 182                 in->user_index = MLX5_IB_DEFAULT_UIDX;
 183 
 184         return 0;
 185 
 186 err_in:
 187         kvfree(in->pas);
 188 
 189 err_buf:
 190         mlx5_frag_buf_free(dev->mdev, &srq->buf);
 191 
 192 err_db:
 193         mlx5_db_free(dev->mdev, &srq->db);
 194         return err;
 195 }
 196 
 197 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
 198                              struct ib_udata *udata)
 199 {
 200         mlx5_ib_db_unmap_user(
 201                 rdma_udata_to_drv_context(
 202                         udata,
 203                         struct mlx5_ib_ucontext,
 204                         ibucontext),
 205                 &srq->db);
 206         ib_umem_release(srq->umem);
 207 }
 208 
 209 
 210 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
 211 {
 212         kvfree(srq->wrid);
 213         mlx5_frag_buf_free(dev->mdev, &srq->buf);
 214         mlx5_db_free(dev->mdev, &srq->db);
 215 }
 216 
 217 int mlx5_ib_create_srq(struct ib_srq *ib_srq,
 218                        struct ib_srq_init_attr *init_attr,
 219                        struct ib_udata *udata)
 220 {
 221         struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
 222         struct mlx5_ib_srq *srq = to_msrq(ib_srq);
 223         size_t desc_size;
 224         size_t buf_size;
 225         int err;
 226         struct mlx5_srq_attr in = {};
 227         __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 228 
 229         /* Sanity check SRQ size before proceeding */
 230         if (init_attr->attr.max_wr >= max_srq_wqes) {
 231                 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
 232                             init_attr->attr.max_wr,
 233                             max_srq_wqes);
 234                 return -EINVAL;
 235         }
 236 
 237         mutex_init(&srq->mutex);
 238         spin_lock_init(&srq->lock);
 239         srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
 240         srq->msrq.max_gs = init_attr->attr.max_sge;
 241 
 242         desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
 243                     srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
 244         if (desc_size == 0 || srq->msrq.max_gs > desc_size)
 245                 return -EINVAL;
 246 
 247         desc_size = roundup_pow_of_two(desc_size);
 248         desc_size = max_t(size_t, 32, desc_size);
 249         if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
 250                 return -EINVAL;
 251 
 252         srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
 253                 sizeof(struct mlx5_wqe_data_seg);
 254         srq->msrq.wqe_shift = ilog2(desc_size);
 255         buf_size = srq->msrq.max * desc_size;
 256         if (buf_size < desc_size)
 257                 return -EINVAL;
 258 
 259         in.type = init_attr->srq_type;
 260 
 261         if (udata)
 262                 err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
 263         else
 264                 err = create_srq_kernel(dev, srq, &in, buf_size);
 265 
 266         if (err) {
 267                 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
 268                              udata ? "user" : "kernel", err);
 269                 return err;
 270         }
 271 
 272         in.log_size = ilog2(srq->msrq.max);
 273         in.wqe_shift = srq->msrq.wqe_shift - 4;
 274         if (srq->wq_sig)
 275                 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
 276 
 277         if (init_attr->srq_type == IB_SRQT_XRC)
 278                 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
 279         else
 280                 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
 281 
 282         if (init_attr->srq_type == IB_SRQT_TM) {
 283                 in.tm_log_list_size =
 284                         ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
 285                 if (in.tm_log_list_size >
 286                     MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
 287                         mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
 288                         err = -EINVAL;
 289                         goto err_usr_kern_srq;
 290                 }
 291                 in.flags |= MLX5_SRQ_FLAG_RNDV;
 292         }
 293 
 294         if (ib_srq_has_cq(init_attr->srq_type))
 295                 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
 296         else
 297                 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
 298 
 299         in.pd = to_mpd(ib_srq->pd)->pdn;
 300         in.db_record = srq->db.dma;
 301         err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
 302         kvfree(in.pas);
 303         if (err) {
 304                 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
 305                 goto err_usr_kern_srq;
 306         }
 307 
 308         mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
 309 
 310         srq->msrq.event = mlx5_ib_srq_event;
 311         srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
 312 
 313         if (udata)
 314                 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
 315                         mlx5_ib_dbg(dev, "copy to user failed\n");
 316                         err = -EFAULT;
 317                         goto err_core;
 318                 }
 319 
 320         init_attr->attr.max_wr = srq->msrq.max - 1;
 321 
 322         return 0;
 323 
 324 err_core:
 325         mlx5_cmd_destroy_srq(dev, &srq->msrq);
 326 
 327 err_usr_kern_srq:
 328         if (udata)
 329                 destroy_srq_user(ib_srq->pd, srq, udata);
 330         else
 331                 destroy_srq_kernel(dev, srq);
 332 
 333         return err;
 334 }
 335 
 336 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 337                        enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
 338 {
 339         struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
 340         struct mlx5_ib_srq *srq = to_msrq(ibsrq);
 341         int ret;
 342 
 343         /* We don't support resizing SRQs yet */
 344         if (attr_mask & IB_SRQ_MAX_WR)
 345                 return -EINVAL;
 346 
 347         if (attr_mask & IB_SRQ_LIMIT) {
 348                 if (attr->srq_limit >= srq->msrq.max)
 349                         return -EINVAL;
 350 
 351                 mutex_lock(&srq->mutex);
 352                 ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
 353                 mutex_unlock(&srq->mutex);
 354 
 355                 if (ret)
 356                         return ret;
 357         }
 358 
 359         return 0;
 360 }
 361 
 362 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
 363 {
 364         struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
 365         struct mlx5_ib_srq *srq = to_msrq(ibsrq);
 366         int ret;
 367         struct mlx5_srq_attr *out;
 368 
 369         out = kzalloc(sizeof(*out), GFP_KERNEL);
 370         if (!out)
 371                 return -ENOMEM;
 372 
 373         ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
 374         if (ret)
 375                 goto out_box;
 376 
 377         srq_attr->srq_limit = out->lwm;
 378         srq_attr->max_wr    = srq->msrq.max - 1;
 379         srq_attr->max_sge   = srq->msrq.max_gs;
 380 
 381 out_box:
 382         kfree(out);
 383         return ret;
 384 }
 385 
 386 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 387 {
 388         struct mlx5_ib_dev *dev = to_mdev(srq->device);
 389         struct mlx5_ib_srq *msrq = to_msrq(srq);
 390 
 391         mlx5_cmd_destroy_srq(dev, &msrq->msrq);
 392 
 393         if (srq->uobject) {
 394                 mlx5_ib_db_unmap_user(
 395                         rdma_udata_to_drv_context(
 396                                 udata,
 397                                 struct mlx5_ib_ucontext,
 398                                 ibucontext),
 399                         &msrq->db);
 400                 ib_umem_release(msrq->umem);
 401         } else {
 402                 destroy_srq_kernel(dev, msrq);
 403         }
 404 }
 405 
 406 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
 407 {
 408         struct mlx5_wqe_srq_next_seg *next;
 409 
 410         /* always called with interrupts disabled. */
 411         spin_lock(&srq->lock);
 412 
 413         next = get_wqe(srq, srq->tail);
 414         next->next_wqe_index = cpu_to_be16(wqe_index);
 415         srq->tail = wqe_index;
 416 
 417         spin_unlock(&srq->lock);
 418 }
 419 
 420 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 421                           const struct ib_recv_wr **bad_wr)
 422 {
 423         struct mlx5_ib_srq *srq = to_msrq(ibsrq);
 424         struct mlx5_wqe_srq_next_seg *next;
 425         struct mlx5_wqe_data_seg *scat;
 426         struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
 427         struct mlx5_core_dev *mdev = dev->mdev;
 428         unsigned long flags;
 429         int err = 0;
 430         int nreq;
 431         int i;
 432 
 433         spin_lock_irqsave(&srq->lock, flags);
 434 
 435         if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
 436                 err = -EIO;
 437                 *bad_wr = wr;
 438                 goto out;
 439         }
 440 
 441         for (nreq = 0; wr; nreq++, wr = wr->next) {
 442                 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
 443                         err = -EINVAL;
 444                         *bad_wr = wr;
 445                         break;
 446                 }
 447 
 448                 if (unlikely(srq->head == srq->tail)) {
 449                         err = -ENOMEM;
 450                         *bad_wr = wr;
 451                         break;
 452                 }
 453 
 454                 srq->wrid[srq->head] = wr->wr_id;
 455 
 456                 next      = get_wqe(srq, srq->head);
 457                 srq->head = be16_to_cpu(next->next_wqe_index);
 458                 scat      = (struct mlx5_wqe_data_seg *)(next + 1);
 459 
 460                 for (i = 0; i < wr->num_sge; i++) {
 461                         scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
 462                         scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
 463                         scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
 464                 }
 465 
 466                 if (i < srq->msrq.max_avail_gather) {
 467                         scat[i].byte_count = 0;
 468                         scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
 469                         scat[i].addr       = 0;
 470                 }
 471         }
 472 
 473         if (likely(nreq)) {
 474                 srq->wqe_ctr += nreq;
 475 
 476                 /* Make sure that descriptors are written before
 477                  * doorbell record.
 478                  */
 479                 wmb();
 480 
 481                 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
 482         }
 483 out:
 484         spin_unlock_irqrestore(&srq->lock, flags);
 485 
 486         return err;
 487 }

/* [<][>][^][v][top][bottom][index][help] */