root/drivers/infiniband/hw/qib/qib_qp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mk_qpn
  2. find_next_offset
  3. get_map_page
  4. qib_alloc_qpn
  5. qib_free_all_qps
  6. qib_notify_qp_reset
  7. qib_notify_error_qp
  8. mtu_to_enum
  9. qib_get_pmtu_from_attr
  10. qib_mtu_to_path_mtu
  11. qib_mtu_from_qp
  12. qib_qp_priv_alloc
  13. qib_qp_priv_free
  14. qib_stop_send_queue
  15. qib_quiesce_qp
  16. qib_flush_qp_waiters
  17. qib_check_send_wqe
  18. qib_qp_iter_print

   1 /*
   2  * Copyright (c) 2012 - 2019 Intel Corporation.  All rights reserved.
   3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
   4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5  *
   6  * This software is available to you under a choice of one of two
   7  * licenses.  You may choose to be licensed under the terms of the GNU
   8  * General Public License (GPL) Version 2, available from the file
   9  * COPYING in the main directory of this source tree, or the
  10  * OpenIB.org BSD license below:
  11  *
  12  *     Redistribution and use in source and binary forms, with or
  13  *     without modification, are permitted provided that the following
  14  *     conditions are met:
  15  *
  16  *      - Redistributions of source code must retain the above
  17  *        copyright notice, this list of conditions and the following
  18  *        disclaimer.
  19  *
  20  *      - Redistributions in binary form must reproduce the above
  21  *        copyright notice, this list of conditions and the following
  22  *        disclaimer in the documentation and/or other materials
  23  *        provided with the distribution.
  24  *
  25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32  * SOFTWARE.
  33  */
  34 
  35 #include <linux/err.h>
  36 #include <linux/vmalloc.h>
  37 #include <rdma/rdma_vt.h>
  38 #ifdef CONFIG_DEBUG_FS
  39 #include <linux/seq_file.h>
  40 #endif
  41 
  42 #include "qib.h"
  43 
  44 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
  45                               struct rvt_qpn_map *map, unsigned off)
  46 {
  47         return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
  48 }
  49 
  50 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
  51                                         struct rvt_qpn_map *map, unsigned off,
  52                                         unsigned n, u16 qpt_mask)
  53 {
  54         if (qpt_mask) {
  55                 off++;
  56                 if (((off & qpt_mask) >> 1) >= n)
  57                         off = (off | qpt_mask) + 2;
  58         } else {
  59                 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
  60         }
  61         return off;
  62 }
  63 
  64 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
  65 [IB_WR_RDMA_WRITE] = {
  66         .length = sizeof(struct ib_rdma_wr),
  67         .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
  68 },
  69 
  70 [IB_WR_RDMA_READ] = {
  71         .length = sizeof(struct ib_rdma_wr),
  72         .qpt_support = BIT(IB_QPT_RC),
  73         .flags = RVT_OPERATION_ATOMIC,
  74 },
  75 
  76 [IB_WR_ATOMIC_CMP_AND_SWP] = {
  77         .length = sizeof(struct ib_atomic_wr),
  78         .qpt_support = BIT(IB_QPT_RC),
  79         .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
  80 },
  81 
  82 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
  83         .length = sizeof(struct ib_atomic_wr),
  84         .qpt_support = BIT(IB_QPT_RC),
  85         .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
  86 },
  87 
  88 [IB_WR_RDMA_WRITE_WITH_IMM] = {
  89         .length = sizeof(struct ib_rdma_wr),
  90         .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
  91 },
  92 
  93 [IB_WR_SEND] = {
  94         .length = sizeof(struct ib_send_wr),
  95         .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
  96                        BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
  97 },
  98 
  99 [IB_WR_SEND_WITH_IMM] = {
 100         .length = sizeof(struct ib_send_wr),
 101         .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
 102                        BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
 103 },
 104 
 105 };
 106 
 107 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
 108 {
 109         unsigned long page = get_zeroed_page(GFP_KERNEL);
 110 
 111         /*
 112          * Free the page if someone raced with us installing it.
 113          */
 114 
 115         spin_lock(&qpt->lock);
 116         if (map->page)
 117                 free_page(page);
 118         else
 119                 map->page = (void *)page;
 120         spin_unlock(&qpt->lock);
 121 }
 122 
 123 /*
 124  * Allocate the next available QPN or
 125  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
 126  */
 127 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 128                   enum ib_qp_type type, u8 port)
 129 {
 130         u32 i, offset, max_scan, qpn;
 131         struct rvt_qpn_map *map;
 132         u32 ret;
 133         struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 134         struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 135                                               verbs_dev);
 136         u16 qpt_mask = dd->qpn_mask;
 137 
 138         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 139                 unsigned n;
 140 
 141                 ret = type == IB_QPT_GSI;
 142                 n = 1 << (ret + 2 * (port - 1));
 143                 spin_lock(&qpt->lock);
 144                 if (qpt->flags & n)
 145                         ret = -EINVAL;
 146                 else
 147                         qpt->flags |= n;
 148                 spin_unlock(&qpt->lock);
 149                 goto bail;
 150         }
 151 
 152         qpn = qpt->last + 2;
 153         if (qpn >= RVT_QPN_MAX)
 154                 qpn = 2;
 155         if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
 156                 qpn = (qpn | qpt_mask) + 2;
 157         offset = qpn & RVT_BITS_PER_PAGE_MASK;
 158         map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
 159         max_scan = qpt->nmaps - !offset;
 160         for (i = 0;;) {
 161                 if (unlikely(!map->page)) {
 162                         get_map_page(qpt, map);
 163                         if (unlikely(!map->page))
 164                                 break;
 165                 }
 166                 do {
 167                         if (!test_and_set_bit(offset, map->page)) {
 168                                 qpt->last = qpn;
 169                                 ret = qpn;
 170                                 goto bail;
 171                         }
 172                         offset = find_next_offset(qpt, map, offset,
 173                                 dd->n_krcv_queues, qpt_mask);
 174                         qpn = mk_qpn(qpt, map, offset);
 175                         /*
 176                          * This test differs from alloc_pidmap().
 177                          * If find_next_offset() does find a zero
 178                          * bit, we don't need to check for QPN
 179                          * wrapping around past our starting QPN.
 180                          * We just need to be sure we don't loop
 181                          * forever.
 182                          */
 183                 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
 184                 /*
 185                  * In order to keep the number of pages allocated to a
 186                  * minimum, we scan the all existing pages before increasing
 187                  * the size of the bitmap table.
 188                  */
 189                 if (++i > max_scan) {
 190                         if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
 191                                 break;
 192                         map = &qpt->map[qpt->nmaps++];
 193                         offset = 0;
 194                 } else if (map < &qpt->map[qpt->nmaps]) {
 195                         ++map;
 196                         offset = 0;
 197                 } else {
 198                         map = &qpt->map[0];
 199                         offset = 2;
 200                 }
 201                 qpn = mk_qpn(qpt, map, offset);
 202         }
 203 
 204         ret = -ENOMEM;
 205 
 206 bail:
 207         return ret;
 208 }
 209 
 210 /**
 211  * qib_free_all_qps - check for QPs still in use
 212  */
 213 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
 214 {
 215         struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 216         struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 217                                               verbs_dev);
 218         unsigned n, qp_inuse = 0;
 219 
 220         for (n = 0; n < dd->num_pports; n++) {
 221                 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
 222 
 223                 rcu_read_lock();
 224                 if (rcu_dereference(ibp->rvp.qp[0]))
 225                         qp_inuse++;
 226                 if (rcu_dereference(ibp->rvp.qp[1]))
 227                         qp_inuse++;
 228                 rcu_read_unlock();
 229         }
 230         return qp_inuse;
 231 }
 232 
 233 void qib_notify_qp_reset(struct rvt_qp *qp)
 234 {
 235         struct qib_qp_priv *priv = qp->priv;
 236 
 237         atomic_set(&priv->s_dma_busy, 0);
 238 }
 239 
 240 void qib_notify_error_qp(struct rvt_qp *qp)
 241 {
 242         struct qib_qp_priv *priv = qp->priv;
 243         struct qib_ibdev *dev = to_idev(qp->ibqp.device);
 244 
 245         spin_lock(&dev->rdi.pending_lock);
 246         if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
 247                 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
 248                 list_del_init(&priv->iowait);
 249         }
 250         spin_unlock(&dev->rdi.pending_lock);
 251 
 252         if (!(qp->s_flags & RVT_S_BUSY)) {
 253                 qp->s_hdrwords = 0;
 254                 if (qp->s_rdma_mr) {
 255                         rvt_put_mr(qp->s_rdma_mr);
 256                         qp->s_rdma_mr = NULL;
 257                 }
 258                 if (priv->s_tx) {
 259                         qib_put_txreq(priv->s_tx);
 260                         priv->s_tx = NULL;
 261                 }
 262         }
 263 }
 264 
 265 static int mtu_to_enum(u32 mtu)
 266 {
 267         int enum_mtu;
 268 
 269         switch (mtu) {
 270         case 4096:
 271                 enum_mtu = IB_MTU_4096;
 272                 break;
 273         case 2048:
 274                 enum_mtu = IB_MTU_2048;
 275                 break;
 276         case 1024:
 277                 enum_mtu = IB_MTU_1024;
 278                 break;
 279         case 512:
 280                 enum_mtu = IB_MTU_512;
 281                 break;
 282         case 256:
 283                 enum_mtu = IB_MTU_256;
 284                 break;
 285         default:
 286                 enum_mtu = IB_MTU_2048;
 287         }
 288         return enum_mtu;
 289 }
 290 
 291 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 292                            struct ib_qp_attr *attr)
 293 {
 294         int mtu, pmtu, pidx = qp->port_num - 1;
 295         struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 296         struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 297                                               verbs_dev);
 298         mtu = ib_mtu_enum_to_int(attr->path_mtu);
 299         if (mtu == -1)
 300                 return -EINVAL;
 301 
 302         if (mtu > dd->pport[pidx].ibmtu)
 303                 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
 304         else
 305                 pmtu = attr->path_mtu;
 306         return pmtu;
 307 }
 308 
 309 int qib_mtu_to_path_mtu(u32 mtu)
 310 {
 311         return mtu_to_enum(mtu);
 312 }
 313 
 314 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
 315 {
 316         return ib_mtu_enum_to_int(pmtu);
 317 }
 318 
 319 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 320 {
 321         struct qib_qp_priv *priv;
 322 
 323         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 324         if (!priv)
 325                 return ERR_PTR(-ENOMEM);
 326         priv->owner = qp;
 327 
 328         priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
 329         if (!priv->s_hdr) {
 330                 kfree(priv);
 331                 return ERR_PTR(-ENOMEM);
 332         }
 333         init_waitqueue_head(&priv->wait_dma);
 334         INIT_WORK(&priv->s_work, _qib_do_send);
 335         INIT_LIST_HEAD(&priv->iowait);
 336 
 337         return priv;
 338 }
 339 
 340 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 341 {
 342         struct qib_qp_priv *priv = qp->priv;
 343 
 344         kfree(priv->s_hdr);
 345         kfree(priv);
 346 }
 347 
 348 void qib_stop_send_queue(struct rvt_qp *qp)
 349 {
 350         struct qib_qp_priv *priv = qp->priv;
 351 
 352         cancel_work_sync(&priv->s_work);
 353 }
 354 
 355 void qib_quiesce_qp(struct rvt_qp *qp)
 356 {
 357         struct qib_qp_priv *priv = qp->priv;
 358 
 359         wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
 360         if (priv->s_tx) {
 361                 qib_put_txreq(priv->s_tx);
 362                 priv->s_tx = NULL;
 363         }
 364 }
 365 
 366 void qib_flush_qp_waiters(struct rvt_qp *qp)
 367 {
 368         struct qib_qp_priv *priv = qp->priv;
 369         struct qib_ibdev *dev = to_idev(qp->ibqp.device);
 370 
 371         spin_lock(&dev->rdi.pending_lock);
 372         if (!list_empty(&priv->iowait))
 373                 list_del_init(&priv->iowait);
 374         spin_unlock(&dev->rdi.pending_lock);
 375 }
 376 
 377 /**
 378  * qib_check_send_wqe - validate wr/wqe
 379  * @qp - The qp
 380  * @wqe - The built wqe
 381  * @call_send - Determine if the send should be posted or scheduled
 382  *
 383  * Returns 0 on success, -EINVAL on failure
 384  */
 385 int qib_check_send_wqe(struct rvt_qp *qp,
 386                        struct rvt_swqe *wqe, bool *call_send)
 387 {
 388         struct rvt_ah *ah;
 389 
 390         switch (qp->ibqp.qp_type) {
 391         case IB_QPT_RC:
 392         case IB_QPT_UC:
 393                 if (wqe->length > 0x80000000U)
 394                         return -EINVAL;
 395                 if (wqe->length > qp->pmtu)
 396                         *call_send = false;
 397                 break;
 398         case IB_QPT_SMI:
 399         case IB_QPT_GSI:
 400         case IB_QPT_UD:
 401                 ah = rvt_get_swqe_ah(wqe);
 402                 if (wqe->length > (1 << ah->log_pmtu))
 403                         return -EINVAL;
 404                 /* progress hint */
 405                 *call_send = true;
 406                 break;
 407         default:
 408                 break;
 409         }
 410         return 0;
 411 }
 412 
 413 #ifdef CONFIG_DEBUG_FS
 414 
 415 static const char * const qp_type_str[] = {
 416         "SMI", "GSI", "RC", "UC", "UD",
 417 };
 418 
 419 /**
 420  * qib_qp_iter_print - print information to seq_file
 421  * @s - the seq_file
 422  * @iter - the iterator
 423  */
 424 void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
 425 {
 426         struct rvt_swqe *wqe;
 427         struct rvt_qp *qp = iter->qp;
 428         struct qib_qp_priv *priv = qp->priv;
 429 
 430         wqe = rvt_get_swqe_ptr(qp, qp->s_last);
 431         seq_printf(s,
 432                    "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
 433                    iter->n,
 434                    qp->ibqp.qp_num,
 435                    qp_type_str[qp->ibqp.qp_type],
 436                    qp->state,
 437                    wqe->wr.opcode,
 438                    qp->s_hdrwords,
 439                    qp->s_flags,
 440                    atomic_read(&priv->s_dma_busy),
 441                    !list_empty(&priv->iowait),
 442                    qp->timeout,
 443                    wqe->ssn,
 444                    qp->s_lsn,
 445                    qp->s_last_psn,
 446                    qp->s_psn, qp->s_next_psn,
 447                    qp->s_sending_psn, qp->s_sending_hpsn,
 448                    qp->s_last, qp->s_acked, qp->s_cur,
 449                    qp->s_tail, qp->s_head, qp->s_size,
 450                    qp->remote_qpn,
 451                    rdma_ah_get_dlid(&qp->remote_ah_attr));
 452 }
 453 
 454 #endif

/* [<][>][^][v][top][bottom][index][help] */