root/drivers/infiniband/sw/rdmavt/qp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rvt_wss_llc_size
  2. cacheless_memcpy
  3. rvt_wss_exit
  4. rvt_wss_init
  5. wss_advance_clean_counter
  6. wss_insert
  7. wss_exceeds_threshold
  8. get_map_page
  9. init_qpn_table
  10. free_qpn_table
  11. rvt_driver_qp_init
  12. rvt_free_qp_cb
  13. rvt_free_all_qps
  14. rvt_qp_exit
  15. mk_qpn
  16. alloc_qpn
  17. rvt_clear_mr_refs
  18. rvt_swqe_has_lkey
  19. rvt_qp_sends_has_lkey
  20. rvt_qp_acks_has_lkey
  21. rvt_qp_mr_clean
  22. rvt_remove_qp
  23. rvt_alloc_rq
  24. rvt_init_qp
  25. _rvt_reset_qp
  26. rvt_reset_qp
  27. rvt_free_qpn
  28. get_allowed_ops
  29. free_ud_wq_attr
  30. alloc_ud_wq_attr
  31. rvt_create_qp
  32. rvt_error_qp
  33. rvt_insert_qp
  34. rvt_modify_qp
  35. rvt_destroy_qp
  36. rvt_query_qp
  37. rvt_post_recv
  38. rvt_qp_valid_operation
  39. rvt_qp_is_avail
  40. rvt_post_one_wr
  41. rvt_post_send
  42. rvt_post_srq_recv
  43. rvt_cast_sge
  44. init_sge
  45. get_count
  46. get_rvt_head
  47. rvt_get_rwqe
  48. rvt_comm_est
  49. rvt_rc_error
  50. rvt_rnr_tbl_to_usec
  51. rvt_aeth_to_usec
  52. rvt_add_retry_timer_ext
  53. rvt_add_rnr_timer
  54. rvt_stop_rc_timers
  55. rvt_stop_rnr_timer
  56. rvt_del_timers_sync
  57. rvt_rc_timeout
  58. rvt_rc_rnr_retry
  59. rvt_qp_iter_init
  60. rvt_qp_iter_next
  61. rvt_qp_iter
  62. rvt_send_complete
  63. rvt_copy_sge
  64. loopback_qp_drop
  65. rvt_ruc_loopback

   1 /*
   2  * Copyright(c) 2016 - 2019 Intel Corporation.
   3  *
   4  * This file is provided under a dual BSD/GPLv2 license.  When using or
   5  * redistributing this file, you may do so under either license.
   6  *
   7  * GPL LICENSE SUMMARY
   8  *
   9  * This program is free software; you can redistribute it and/or modify
  10  * it under the terms of version 2 of the GNU General Public License as
  11  * published by the Free Software Foundation.
  12  *
  13  * This program is distributed in the hope that it will be useful, but
  14  * WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  * General Public License for more details.
  17  *
  18  * BSD LICENSE
  19  *
  20  * Redistribution and use in source and binary forms, with or without
  21  * modification, are permitted provided that the following conditions
  22  * are met:
  23  *
  24  *  - Redistributions of source code must retain the above copyright
  25  *    notice, this list of conditions and the following disclaimer.
  26  *  - Redistributions in binary form must reproduce the above copyright
  27  *    notice, this list of conditions and the following disclaimer in
  28  *    the documentation and/or other materials provided with the
  29  *    distribution.
  30  *  - Neither the name of Intel Corporation nor the names of its
  31  *    contributors may be used to endorse or promote products derived
  32  *    from this software without specific prior written permission.
  33  *
  34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45  *
  46  */
  47 
  48 #include <linux/hash.h>
  49 #include <linux/bitops.h>
  50 #include <linux/lockdep.h>
  51 #include <linux/vmalloc.h>
  52 #include <linux/slab.h>
  53 #include <rdma/ib_verbs.h>
  54 #include <rdma/ib_hdrs.h>
  55 #include <rdma/opa_addr.h>
  56 #include <rdma/uverbs_ioctl.h>
  57 #include "qp.h"
  58 #include "vt.h"
  59 #include "trace.h"
  60 
  61 #define RVT_RWQ_COUNT_THRESHOLD 16
  62 
  63 static void rvt_rc_timeout(struct timer_list *t);
  64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  65                          enum ib_qp_type type);
  66 
  67 /*
  68  * Convert the AETH RNR timeout code into the number of microseconds.
  69  */
  70 static const u32 ib_rvt_rnr_table[32] = {
  71         655360, /* 00: 655.36 */
  72         10,     /* 01:    .01 */
  73         20,     /* 02     .02 */
  74         30,     /* 03:    .03 */
  75         40,     /* 04:    .04 */
  76         60,     /* 05:    .06 */
  77         80,     /* 06:    .08 */
  78         120,    /* 07:    .12 */
  79         160,    /* 08:    .16 */
  80         240,    /* 09:    .24 */
  81         320,    /* 0A:    .32 */
  82         480,    /* 0B:    .48 */
  83         640,    /* 0C:    .64 */
  84         960,    /* 0D:    .96 */
  85         1280,   /* 0E:   1.28 */
  86         1920,   /* 0F:   1.92 */
  87         2560,   /* 10:   2.56 */
  88         3840,   /* 11:   3.84 */
  89         5120,   /* 12:   5.12 */
  90         7680,   /* 13:   7.68 */
  91         10240,  /* 14:  10.24 */
  92         15360,  /* 15:  15.36 */
  93         20480,  /* 16:  20.48 */
  94         30720,  /* 17:  30.72 */
  95         40960,  /* 18:  40.96 */
  96         61440,  /* 19:  61.44 */
  97         81920,  /* 1A:  81.92 */
  98         122880, /* 1B: 122.88 */
  99         163840, /* 1C: 163.84 */
 100         245760, /* 1D: 245.76 */
 101         327680, /* 1E: 327.68 */
 102         491520  /* 1F: 491.52 */
 103 };
 104 
 105 /*
 106  * Note that it is OK to post send work requests in the SQE and ERR
 107  * states; rvt_do_send() will process them and generate error
 108  * completions as per IB 1.2 C10-96.
 109  */
 110 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
 111         [IB_QPS_RESET] = 0,
 112         [IB_QPS_INIT] = RVT_POST_RECV_OK,
 113         [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
 114         [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 115             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
 116             RVT_PROCESS_NEXT_SEND_OK,
 117         [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 118             RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
 119         [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 120             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
 121         [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
 122             RVT_POST_SEND_OK | RVT_FLUSH_SEND,
 123 };
 124 EXPORT_SYMBOL(ib_rvt_state_ops);
 125 
 126 /* platform specific: return the last level cache (llc) size, in KiB */
 127 static int rvt_wss_llc_size(void)
 128 {
 129         /* assume that the boot CPU value is universal for all CPUs */
 130         return boot_cpu_data.x86_cache_size;
 131 }
 132 
 133 /* platform specific: cacheless copy */
 134 static void cacheless_memcpy(void *dst, void *src, size_t n)
 135 {
 136         /*
 137          * Use the only available X64 cacheless copy.  Add a __user cast
 138          * to quiet sparse.  The src agument is already in the kernel so
 139          * there are no security issues.  The extra fault recovery machinery
 140          * is not invoked.
 141          */
 142         __copy_user_nocache(dst, (void __user *)src, n, 0);
 143 }
 144 
 145 void rvt_wss_exit(struct rvt_dev_info *rdi)
 146 {
 147         struct rvt_wss *wss = rdi->wss;
 148 
 149         if (!wss)
 150                 return;
 151 
 152         /* coded to handle partially initialized and repeat callers */
 153         kfree(wss->entries);
 154         wss->entries = NULL;
 155         kfree(rdi->wss);
 156         rdi->wss = NULL;
 157 }
 158 
 159 /**
 160  * rvt_wss_init - Init wss data structures
 161  *
 162  * Return: 0 on success
 163  */
 164 int rvt_wss_init(struct rvt_dev_info *rdi)
 165 {
 166         unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
 167         unsigned int wss_threshold = rdi->dparms.wss_threshold;
 168         unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
 169         long llc_size;
 170         long llc_bits;
 171         long table_size;
 172         long table_bits;
 173         struct rvt_wss *wss;
 174         int node = rdi->dparms.node;
 175 
 176         if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
 177                 rdi->wss = NULL;
 178                 return 0;
 179         }
 180 
 181         rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
 182         if (!rdi->wss)
 183                 return -ENOMEM;
 184         wss = rdi->wss;
 185 
 186         /* check for a valid percent range - default to 80 if none or invalid */
 187         if (wss_threshold < 1 || wss_threshold > 100)
 188                 wss_threshold = 80;
 189 
 190         /* reject a wildly large period */
 191         if (wss_clean_period > 1000000)
 192                 wss_clean_period = 256;
 193 
 194         /* reject a zero period */
 195         if (wss_clean_period == 0)
 196                 wss_clean_period = 1;
 197 
 198         /*
 199          * Calculate the table size - the next power of 2 larger than the
 200          * LLC size.  LLC size is in KiB.
 201          */
 202         llc_size = rvt_wss_llc_size() * 1024;
 203         table_size = roundup_pow_of_two(llc_size);
 204 
 205         /* one bit per page in rounded up table */
 206         llc_bits = llc_size / PAGE_SIZE;
 207         table_bits = table_size / PAGE_SIZE;
 208         wss->pages_mask = table_bits - 1;
 209         wss->num_entries = table_bits / BITS_PER_LONG;
 210 
 211         wss->threshold = (llc_bits * wss_threshold) / 100;
 212         if (wss->threshold == 0)
 213                 wss->threshold = 1;
 214 
 215         wss->clean_period = wss_clean_period;
 216         atomic_set(&wss->clean_counter, wss_clean_period);
 217 
 218         wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
 219                                     GFP_KERNEL, node);
 220         if (!wss->entries) {
 221                 rvt_wss_exit(rdi);
 222                 return -ENOMEM;
 223         }
 224 
 225         return 0;
 226 }
 227 
 228 /*
 229  * Advance the clean counter.  When the clean period has expired,
 230  * clean an entry.
 231  *
 232  * This is implemented in atomics to avoid locking.  Because multiple
 233  * variables are involved, it can be racy which can lead to slightly
 234  * inaccurate information.  Since this is only a heuristic, this is
 235  * OK.  Any innaccuracies will clean themselves out as the counter
 236  * advances.  That said, it is unlikely the entry clean operation will
 237  * race - the next possible racer will not start until the next clean
 238  * period.
 239  *
 240  * The clean counter is implemented as a decrement to zero.  When zero
 241  * is reached an entry is cleaned.
 242  */
 243 static void wss_advance_clean_counter(struct rvt_wss *wss)
 244 {
 245         int entry;
 246         int weight;
 247         unsigned long bits;
 248 
 249         /* become the cleaner if we decrement the counter to zero */
 250         if (atomic_dec_and_test(&wss->clean_counter)) {
 251                 /*
 252                  * Set, not add, the clean period.  This avoids an issue
 253                  * where the counter could decrement below the clean period.
 254                  * Doing a set can result in lost decrements, slowing the
 255                  * clean advance.  Since this a heuristic, this possible
 256                  * slowdown is OK.
 257                  *
 258                  * An alternative is to loop, advancing the counter by a
 259                  * clean period until the result is > 0. However, this could
 260                  * lead to several threads keeping another in the clean loop.
 261                  * This could be mitigated by limiting the number of times
 262                  * we stay in the loop.
 263                  */
 264                 atomic_set(&wss->clean_counter, wss->clean_period);
 265 
 266                 /*
 267                  * Uniquely grab the entry to clean and move to next.
 268                  * The current entry is always the lower bits of
 269                  * wss.clean_entry.  The table size, wss.num_entries,
 270                  * is always a power-of-2.
 271                  */
 272                 entry = (atomic_inc_return(&wss->clean_entry) - 1)
 273                         & (wss->num_entries - 1);
 274 
 275                 /* clear the entry and count the bits */
 276                 bits = xchg(&wss->entries[entry], 0);
 277                 weight = hweight64((u64)bits);
 278                 /* only adjust the contended total count if needed */
 279                 if (weight)
 280                         atomic_sub(weight, &wss->total_count);
 281         }
 282 }
 283 
 284 /*
 285  * Insert the given address into the working set array.
 286  */
 287 static void wss_insert(struct rvt_wss *wss, void *address)
 288 {
 289         u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
 290         u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
 291         u32 nr = page & (BITS_PER_LONG - 1);
 292 
 293         if (!test_and_set_bit(nr, &wss->entries[entry]))
 294                 atomic_inc(&wss->total_count);
 295 
 296         wss_advance_clean_counter(wss);
 297 }
 298 
 299 /*
 300  * Is the working set larger than the threshold?
 301  */
 302 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
 303 {
 304         return atomic_read(&wss->total_count) >= wss->threshold;
 305 }
 306 
 307 static void get_map_page(struct rvt_qpn_table *qpt,
 308                          struct rvt_qpn_map *map)
 309 {
 310         unsigned long page = get_zeroed_page(GFP_KERNEL);
 311 
 312         /*
 313          * Free the page if someone raced with us installing it.
 314          */
 315 
 316         spin_lock(&qpt->lock);
 317         if (map->page)
 318                 free_page(page);
 319         else
 320                 map->page = (void *)page;
 321         spin_unlock(&qpt->lock);
 322 }
 323 
 324 /**
 325  * init_qpn_table - initialize the QP number table for a device
 326  * @qpt: the QPN table
 327  */
 328 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
 329 {
 330         u32 offset, i;
 331         struct rvt_qpn_map *map;
 332         int ret = 0;
 333 
 334         if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
 335                 return -EINVAL;
 336 
 337         spin_lock_init(&qpt->lock);
 338 
 339         qpt->last = rdi->dparms.qpn_start;
 340         qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
 341 
 342         /*
 343          * Drivers may want some QPs beyond what we need for verbs let them use
 344          * our qpn table. No need for two. Lets go ahead and mark the bitmaps
 345          * for those. The reserved range must be *after* the range which verbs
 346          * will pick from.
 347          */
 348 
 349         /* Figure out number of bit maps needed before reserved range */
 350         qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
 351 
 352         /* This should always be zero */
 353         offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
 354 
 355         /* Starting with the first reserved bit map */
 356         map = &qpt->map[qpt->nmaps];
 357 
 358         rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
 359                     rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
 360         for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
 361                 if (!map->page) {
 362                         get_map_page(qpt, map);
 363                         if (!map->page) {
 364                                 ret = -ENOMEM;
 365                                 break;
 366                         }
 367                 }
 368                 set_bit(offset, map->page);
 369                 offset++;
 370                 if (offset == RVT_BITS_PER_PAGE) {
 371                         /* next page */
 372                         qpt->nmaps++;
 373                         map++;
 374                         offset = 0;
 375                 }
 376         }
 377         return ret;
 378 }
 379 
 380 /**
 381  * free_qpn_table - free the QP number table for a device
 382  * @qpt: the QPN table
 383  */
 384 static void free_qpn_table(struct rvt_qpn_table *qpt)
 385 {
 386         int i;
 387 
 388         for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
 389                 free_page((unsigned long)qpt->map[i].page);
 390 }
 391 
 392 /**
 393  * rvt_driver_qp_init - Init driver qp resources
 394  * @rdi: rvt dev strucutre
 395  *
 396  * Return: 0 on success
 397  */
 398 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
 399 {
 400         int i;
 401         int ret = -ENOMEM;
 402 
 403         if (!rdi->dparms.qp_table_size)
 404                 return -EINVAL;
 405 
 406         /*
 407          * If driver is not doing any QP allocation then make sure it is
 408          * providing the necessary QP functions.
 409          */
 410         if (!rdi->driver_f.free_all_qps ||
 411             !rdi->driver_f.qp_priv_alloc ||
 412             !rdi->driver_f.qp_priv_free ||
 413             !rdi->driver_f.notify_qp_reset ||
 414             !rdi->driver_f.notify_restart_rc)
 415                 return -EINVAL;
 416 
 417         /* allocate parent object */
 418         rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
 419                                    rdi->dparms.node);
 420         if (!rdi->qp_dev)
 421                 return -ENOMEM;
 422 
 423         /* allocate hash table */
 424         rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
 425         rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
 426         rdi->qp_dev->qp_table =
 427                 kmalloc_array_node(rdi->qp_dev->qp_table_size,
 428                              sizeof(*rdi->qp_dev->qp_table),
 429                              GFP_KERNEL, rdi->dparms.node);
 430         if (!rdi->qp_dev->qp_table)
 431                 goto no_qp_table;
 432 
 433         for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
 434                 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
 435 
 436         spin_lock_init(&rdi->qp_dev->qpt_lock);
 437 
 438         /* initialize qpn map */
 439         if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
 440                 goto fail_table;
 441 
 442         spin_lock_init(&rdi->n_qps_lock);
 443 
 444         return 0;
 445 
 446 fail_table:
 447         kfree(rdi->qp_dev->qp_table);
 448         free_qpn_table(&rdi->qp_dev->qpn_table);
 449 
 450 no_qp_table:
 451         kfree(rdi->qp_dev);
 452 
 453         return ret;
 454 }
 455 
 456 /**
 457  * rvt_free_qp_cb - callback function to reset a qp
 458  * @qp: the qp to reset
 459  * @v: a 64-bit value
 460  *
 461  * This function resets the qp and removes it from the
 462  * qp hash table.
 463  */
 464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
 465 {
 466         unsigned int *qp_inuse = (unsigned int *)v;
 467         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 468 
 469         /* Reset the qp and remove it from the qp hash list */
 470         rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
 471 
 472         /* Increment the qp_inuse count */
 473         (*qp_inuse)++;
 474 }
 475 
 476 /**
 477  * rvt_free_all_qps - check for QPs still in use
 478  * @rdi: rvt device info structure
 479  *
 480  * There should not be any QPs still in use.
 481  * Free memory for table.
 482  * Return the number of QPs still in use.
 483  */
 484 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
 485 {
 486         unsigned int qp_inuse = 0;
 487 
 488         qp_inuse += rvt_mcast_tree_empty(rdi);
 489 
 490         rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
 491 
 492         return qp_inuse;
 493 }
 494 
 495 /**
 496  * rvt_qp_exit - clean up qps on device exit
 497  * @rdi: rvt dev structure
 498  *
 499  * Check for qp leaks and free resources.
 500  */
 501 void rvt_qp_exit(struct rvt_dev_info *rdi)
 502 {
 503         u32 qps_inuse = rvt_free_all_qps(rdi);
 504 
 505         if (qps_inuse)
 506                 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
 507                            qps_inuse);
 508         if (!rdi->qp_dev)
 509                 return;
 510 
 511         kfree(rdi->qp_dev->qp_table);
 512         free_qpn_table(&rdi->qp_dev->qpn_table);
 513         kfree(rdi->qp_dev);
 514 }
 515 
 516 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
 517                               struct rvt_qpn_map *map, unsigned off)
 518 {
 519         return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
 520 }
 521 
 522 /**
 523  * alloc_qpn - Allocate the next available qpn or zero/one for QP type
 524  *             IB_QPT_SMI/IB_QPT_GSI
 525  * @rdi: rvt device info structure
 526  * @qpt: queue pair number table pointer
 527  * @port_num: IB port number, 1 based, comes from core
 528  *
 529  * Return: The queue pair number
 530  */
 531 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 532                      enum ib_qp_type type, u8 port_num)
 533 {
 534         u32 i, offset, max_scan, qpn;
 535         struct rvt_qpn_map *map;
 536         u32 ret;
 537 
 538         if (rdi->driver_f.alloc_qpn)
 539                 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
 540 
 541         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 542                 unsigned n;
 543 
 544                 ret = type == IB_QPT_GSI;
 545                 n = 1 << (ret + 2 * (port_num - 1));
 546                 spin_lock(&qpt->lock);
 547                 if (qpt->flags & n)
 548                         ret = -EINVAL;
 549                 else
 550                         qpt->flags |= n;
 551                 spin_unlock(&qpt->lock);
 552                 goto bail;
 553         }
 554 
 555         qpn = qpt->last + qpt->incr;
 556         if (qpn >= RVT_QPN_MAX)
 557                 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
 558         /* offset carries bit 0 */
 559         offset = qpn & RVT_BITS_PER_PAGE_MASK;
 560         map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
 561         max_scan = qpt->nmaps - !offset;
 562         for (i = 0;;) {
 563                 if (unlikely(!map->page)) {
 564                         get_map_page(qpt, map);
 565                         if (unlikely(!map->page))
 566                                 break;
 567                 }
 568                 do {
 569                         if (!test_and_set_bit(offset, map->page)) {
 570                                 qpt->last = qpn;
 571                                 ret = qpn;
 572                                 goto bail;
 573                         }
 574                         offset += qpt->incr;
 575                         /*
 576                          * This qpn might be bogus if offset >= BITS_PER_PAGE.
 577                          * That is OK.   It gets re-assigned below
 578                          */
 579                         qpn = mk_qpn(qpt, map, offset);
 580                 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
 581                 /*
 582                  * In order to keep the number of pages allocated to a
 583                  * minimum, we scan the all existing pages before increasing
 584                  * the size of the bitmap table.
 585                  */
 586                 if (++i > max_scan) {
 587                         if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
 588                                 break;
 589                         map = &qpt->map[qpt->nmaps++];
 590                         /* start at incr with current bit 0 */
 591                         offset = qpt->incr | (offset & 1);
 592                 } else if (map < &qpt->map[qpt->nmaps]) {
 593                         ++map;
 594                         /* start at incr with current bit 0 */
 595                         offset = qpt->incr | (offset & 1);
 596                 } else {
 597                         map = &qpt->map[0];
 598                         /* wrap to first map page, invert bit 0 */
 599                         offset = qpt->incr | ((offset & 1) ^ 1);
 600                 }
 601                 /* there can be no set bits in low-order QoS bits */
 602                 WARN_ON(rdi->dparms.qos_shift > 1 &&
 603                         offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
 604                 qpn = mk_qpn(qpt, map, offset);
 605         }
 606 
 607         ret = -ENOMEM;
 608 
 609 bail:
 610         return ret;
 611 }
 612 
 613 /**
 614  * rvt_clear_mr_refs - Drop help mr refs
 615  * @qp: rvt qp data structure
 616  * @clr_sends: If shoudl clear send side or not
 617  */
 618 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
 619 {
 620         unsigned n;
 621         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 622 
 623         if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
 624                 rvt_put_ss(&qp->s_rdma_read_sge);
 625 
 626         rvt_put_ss(&qp->r_sge);
 627 
 628         if (clr_sends) {
 629                 while (qp->s_last != qp->s_head) {
 630                         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
 631 
 632                         rvt_put_qp_swqe(qp, wqe);
 633                         if (++qp->s_last >= qp->s_size)
 634                                 qp->s_last = 0;
 635                         smp_wmb(); /* see qp_set_savail */
 636                 }
 637                 if (qp->s_rdma_mr) {
 638                         rvt_put_mr(qp->s_rdma_mr);
 639                         qp->s_rdma_mr = NULL;
 640                 }
 641         }
 642 
 643         for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
 644                 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
 645 
 646                 if (e->rdma_sge.mr) {
 647                         rvt_put_mr(e->rdma_sge.mr);
 648                         e->rdma_sge.mr = NULL;
 649                 }
 650         }
 651 }
 652 
 653 /**
 654  * rvt_swqe_has_lkey - return true if lkey is used by swqe
 655  * @wqe - the send wqe
 656  * @lkey - the lkey
 657  *
 658  * Test the swqe for using lkey
 659  */
 660 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
 661 {
 662         int i;
 663 
 664         for (i = 0; i < wqe->wr.num_sge; i++) {
 665                 struct rvt_sge *sge = &wqe->sg_list[i];
 666 
 667                 if (rvt_mr_has_lkey(sge->mr, lkey))
 668                         return true;
 669         }
 670         return false;
 671 }
 672 
 673 /**
 674  * rvt_qp_sends_has_lkey - return true is qp sends use lkey
 675  * @qp - the rvt_qp
 676  * @lkey - the lkey
 677  */
 678 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
 679 {
 680         u32 s_last = qp->s_last;
 681 
 682         while (s_last != qp->s_head) {
 683                 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
 684 
 685                 if (rvt_swqe_has_lkey(wqe, lkey))
 686                         return true;
 687 
 688                 if (++s_last >= qp->s_size)
 689                         s_last = 0;
 690         }
 691         if (qp->s_rdma_mr)
 692                 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
 693                         return true;
 694         return false;
 695 }
 696 
 697 /**
 698  * rvt_qp_acks_has_lkey - return true if acks have lkey
 699  * @qp - the qp
 700  * @lkey - the lkey
 701  */
 702 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
 703 {
 704         int i;
 705         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 706 
 707         for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
 708                 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
 709 
 710                 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
 711                         return true;
 712         }
 713         return false;
 714 }
 715 
 716 /*
 717  * rvt_qp_mr_clean - clean up remote ops for lkey
 718  * @qp - the qp
 719  * @lkey - the lkey that is being de-registered
 720  *
 721  * This routine checks if the lkey is being used by
 722  * the qp.
 723  *
 724  * If so, the qp is put into an error state to elminate
 725  * any references from the qp.
 726  */
 727 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
 728 {
 729         bool lastwqe = false;
 730 
 731         if (qp->ibqp.qp_type == IB_QPT_SMI ||
 732             qp->ibqp.qp_type == IB_QPT_GSI)
 733                 /* avoid special QPs */
 734                 return;
 735         spin_lock_irq(&qp->r_lock);
 736         spin_lock(&qp->s_hlock);
 737         spin_lock(&qp->s_lock);
 738 
 739         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
 740                 goto check_lwqe;
 741 
 742         if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
 743             rvt_qp_sends_has_lkey(qp, lkey) ||
 744             rvt_qp_acks_has_lkey(qp, lkey))
 745                 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
 746 check_lwqe:
 747         spin_unlock(&qp->s_lock);
 748         spin_unlock(&qp->s_hlock);
 749         spin_unlock_irq(&qp->r_lock);
 750         if (lastwqe) {
 751                 struct ib_event ev;
 752 
 753                 ev.device = qp->ibqp.device;
 754                 ev.element.qp = &qp->ibqp;
 755                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
 756                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
 757         }
 758 }
 759 
 760 /**
 761  * rvt_remove_qp - remove qp form table
 762  * @rdi: rvt dev struct
 763  * @qp: qp to remove
 764  *
 765  * Remove the QP from the table so it can't be found asynchronously by
 766  * the receive routine.
 767  */
 768 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 769 {
 770         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
 771         u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
 772         unsigned long flags;
 773         int removed = 1;
 774 
 775         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
 776 
 777         if (rcu_dereference_protected(rvp->qp[0],
 778                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
 779                 RCU_INIT_POINTER(rvp->qp[0], NULL);
 780         } else if (rcu_dereference_protected(rvp->qp[1],
 781                         lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
 782                 RCU_INIT_POINTER(rvp->qp[1], NULL);
 783         } else {
 784                 struct rvt_qp *q;
 785                 struct rvt_qp __rcu **qpp;
 786 
 787                 removed = 0;
 788                 qpp = &rdi->qp_dev->qp_table[n];
 789                 for (; (q = rcu_dereference_protected(*qpp,
 790                         lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
 791                         qpp = &q->next) {
 792                         if (q == qp) {
 793                                 RCU_INIT_POINTER(*qpp,
 794                                      rcu_dereference_protected(qp->next,
 795                                      lockdep_is_held(&rdi->qp_dev->qpt_lock)));
 796                                 removed = 1;
 797                                 trace_rvt_qpremove(qp, n);
 798                                 break;
 799                         }
 800                 }
 801         }
 802 
 803         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
 804         if (removed) {
 805                 synchronize_rcu();
 806                 rvt_put_qp(qp);
 807         }
 808 }
 809 
 810 /**
 811  * rvt_alloc_rq - allocate memory for user or kernel buffer
 812  * @rq: receive queue data structure
 813  * @size: number of request queue entries
 814  * @node: The NUMA node
 815  * @udata: True if user data is available or not false
 816  *
 817  * Return: If memory allocation failed, return -ENONEM
 818  * This function is used by both shared receive
 819  * queues and non-shared receive queues to allocate
 820  * memory.
 821  */
 822 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
 823                  struct ib_udata *udata)
 824 {
 825         if (udata) {
 826                 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
 827                 if (!rq->wq)
 828                         goto bail;
 829                 /* need kwq with no buffers */
 830                 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
 831                 if (!rq->kwq)
 832                         goto bail;
 833                 rq->kwq->curr_wq = rq->wq->wq;
 834         } else {
 835                 /* need kwq with buffers */
 836                 rq->kwq =
 837                         vzalloc_node(sizeof(struct rvt_krwq) + size, node);
 838                 if (!rq->kwq)
 839                         goto bail;
 840                 rq->kwq->curr_wq = rq->kwq->wq;
 841         }
 842 
 843         spin_lock_init(&rq->kwq->p_lock);
 844         spin_lock_init(&rq->kwq->c_lock);
 845         return 0;
 846 bail:
 847         rvt_free_rq(rq);
 848         return -ENOMEM;
 849 }
 850 
 851 /**
 852  * rvt_init_qp - initialize the QP state to the reset state
 853  * @qp: the QP to init or reinit
 854  * @type: the QP type
 855  *
 856  * This function is called from both rvt_create_qp() and
 857  * rvt_reset_qp().   The difference is that the reset
 858  * patch the necessary locks to protect against concurent
 859  * access.
 860  */
 861 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 862                         enum ib_qp_type type)
 863 {
 864         qp->remote_qpn = 0;
 865         qp->qkey = 0;
 866         qp->qp_access_flags = 0;
 867         qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
 868         qp->s_hdrwords = 0;
 869         qp->s_wqe = NULL;
 870         qp->s_draining = 0;
 871         qp->s_next_psn = 0;
 872         qp->s_last_psn = 0;
 873         qp->s_sending_psn = 0;
 874         qp->s_sending_hpsn = 0;
 875         qp->s_psn = 0;
 876         qp->r_psn = 0;
 877         qp->r_msn = 0;
 878         if (type == IB_QPT_RC) {
 879                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
 880                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
 881         } else {
 882                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
 883                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
 884         }
 885         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
 886         qp->r_nak_state = 0;
 887         qp->r_aflags = 0;
 888         qp->r_flags = 0;
 889         qp->s_head = 0;
 890         qp->s_tail = 0;
 891         qp->s_cur = 0;
 892         qp->s_acked = 0;
 893         qp->s_last = 0;
 894         qp->s_ssn = 1;
 895         qp->s_lsn = 0;
 896         qp->s_mig_state = IB_MIG_MIGRATED;
 897         qp->r_head_ack_queue = 0;
 898         qp->s_tail_ack_queue = 0;
 899         qp->s_acked_ack_queue = 0;
 900         qp->s_num_rd_atomic = 0;
 901         if (qp->r_rq.kwq)
 902                 qp->r_rq.kwq->count = qp->r_rq.size;
 903         qp->r_sge.num_sge = 0;
 904         atomic_set(&qp->s_reserved_used, 0);
 905 }
 906 
 907 /**
 908  * _rvt_reset_qp - initialize the QP state to the reset state
 909  * @qp: the QP to reset
 910  * @type: the QP type
 911  *
 912  * r_lock, s_hlock, and s_lock are required to be held by the caller
 913  */
 914 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 915                           enum ib_qp_type type)
 916         __must_hold(&qp->s_lock)
 917         __must_hold(&qp->s_hlock)
 918         __must_hold(&qp->r_lock)
 919 {
 920         lockdep_assert_held(&qp->r_lock);
 921         lockdep_assert_held(&qp->s_hlock);
 922         lockdep_assert_held(&qp->s_lock);
 923         if (qp->state != IB_QPS_RESET) {
 924                 qp->state = IB_QPS_RESET;
 925 
 926                 /* Let drivers flush their waitlist */
 927                 rdi->driver_f.flush_qp_waiters(qp);
 928                 rvt_stop_rc_timers(qp);
 929                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
 930                 spin_unlock(&qp->s_lock);
 931                 spin_unlock(&qp->s_hlock);
 932                 spin_unlock_irq(&qp->r_lock);
 933 
 934                 /* Stop the send queue and the retry timer */
 935                 rdi->driver_f.stop_send_queue(qp);
 936                 rvt_del_timers_sync(qp);
 937                 /* Wait for things to stop */
 938                 rdi->driver_f.quiesce_qp(qp);
 939 
 940                 /* take qp out the hash and wait for it to be unused */
 941                 rvt_remove_qp(rdi, qp);
 942 
 943                 /* grab the lock b/c it was locked at call time */
 944                 spin_lock_irq(&qp->r_lock);
 945                 spin_lock(&qp->s_hlock);
 946                 spin_lock(&qp->s_lock);
 947 
 948                 rvt_clear_mr_refs(qp, 1);
 949                 /*
 950                  * Let the driver do any tear down or re-init it needs to for
 951                  * a qp that has been reset
 952                  */
 953                 rdi->driver_f.notify_qp_reset(qp);
 954         }
 955         rvt_init_qp(rdi, qp, type);
 956         lockdep_assert_held(&qp->r_lock);
 957         lockdep_assert_held(&qp->s_hlock);
 958         lockdep_assert_held(&qp->s_lock);
 959 }
 960 
 961 /**
 962  * rvt_reset_qp - initialize the QP state to the reset state
 963  * @rdi: the device info
 964  * @qp: the QP to reset
 965  * @type: the QP type
 966  *
 967  * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
 968  * before calling _rvt_reset_qp().
 969  */
 970 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 971                          enum ib_qp_type type)
 972 {
 973         spin_lock_irq(&qp->r_lock);
 974         spin_lock(&qp->s_hlock);
 975         spin_lock(&qp->s_lock);
 976         _rvt_reset_qp(rdi, qp, type);
 977         spin_unlock(&qp->s_lock);
 978         spin_unlock(&qp->s_hlock);
 979         spin_unlock_irq(&qp->r_lock);
 980 }
 981 
 982 /** rvt_free_qpn - Free a qpn from the bit map
 983  * @qpt: QP table
 984  * @qpn: queue pair number to free
 985  */
 986 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
 987 {
 988         struct rvt_qpn_map *map;
 989 
 990         map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
 991         if (map->page)
 992                 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
 993 }
 994 
 995 /**
 996  * get_allowed_ops - Given a QP type return the appropriate allowed OP
 997  * @type: valid, supported, QP type
 998  */
 999 static u8 get_allowed_ops(enum ib_qp_type type)
1000 {
1001         return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1002                 IB_OPCODE_UC : IB_OPCODE_UD;
1003 }
1004 
1005 /**
1006  * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1007  * @qp: Valid QP with allowed_ops set
1008  *
1009  * The rvt_swqe data structure being used is a union, so this is
1010  * only valid for UD QPs.
1011  */
1012 static void free_ud_wq_attr(struct rvt_qp *qp)
1013 {
1014         struct rvt_swqe *wqe;
1015         int i;
1016 
1017         for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1018                 wqe = rvt_get_swqe_ptr(qp, i);
1019                 kfree(wqe->ud_wr.attr);
1020                 wqe->ud_wr.attr = NULL;
1021         }
1022 }
1023 
1024 /**
1025  * alloc_ud_wq_attr - AH attribute cache for UD QPs
1026  * @qp: Valid QP with allowed_ops set
1027  * @node: Numa node for allocation
1028  *
1029  * The rvt_swqe data structure being used is a union, so this is
1030  * only valid for UD QPs.
1031  */
1032 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1033 {
1034         struct rvt_swqe *wqe;
1035         int i;
1036 
1037         for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1038                 wqe = rvt_get_swqe_ptr(qp, i);
1039                 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1040                                                GFP_KERNEL, node);
1041                 if (!wqe->ud_wr.attr) {
1042                         free_ud_wq_attr(qp);
1043                         return -ENOMEM;
1044                 }
1045         }
1046 
1047         return 0;
1048 }
1049 
1050 /**
1051  * rvt_create_qp - create a queue pair for a device
1052  * @ibpd: the protection domain who's device we create the queue pair for
1053  * @init_attr: the attributes of the queue pair
1054  * @udata: user data for libibverbs.so
1055  *
1056  * Queue pair creation is mostly an rvt issue. However, drivers have their own
1057  * unique idea of what queue pair numbers mean. For instance there is a reserved
1058  * range for PSM.
1059  *
1060  * Return: the queue pair on success, otherwise returns an errno.
1061  *
1062  * Called by the ib_create_qp() core verbs function.
1063  */
1064 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1065                             struct ib_qp_init_attr *init_attr,
1066                             struct ib_udata *udata)
1067 {
1068         struct rvt_qp *qp;
1069         int err;
1070         struct rvt_swqe *swq = NULL;
1071         size_t sz;
1072         size_t sg_list_sz;
1073         struct ib_qp *ret = ERR_PTR(-ENOMEM);
1074         struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1075         void *priv = NULL;
1076         size_t sqsize;
1077 
1078         if (!rdi)
1079                 return ERR_PTR(-EINVAL);
1080 
1081         if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1082             init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1083             init_attr->create_flags)
1084                 return ERR_PTR(-EINVAL);
1085 
1086         /* Check receive queue parameters if no SRQ is specified. */
1087         if (!init_attr->srq) {
1088                 if (init_attr->cap.max_recv_sge >
1089                     rdi->dparms.props.max_recv_sge ||
1090                     init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1091                         return ERR_PTR(-EINVAL);
1092 
1093                 if (init_attr->cap.max_send_sge +
1094                     init_attr->cap.max_send_wr +
1095                     init_attr->cap.max_recv_sge +
1096                     init_attr->cap.max_recv_wr == 0)
1097                         return ERR_PTR(-EINVAL);
1098         }
1099         sqsize =
1100                 init_attr->cap.max_send_wr + 1 +
1101                 rdi->dparms.reserved_operations;
1102         switch (init_attr->qp_type) {
1103         case IB_QPT_SMI:
1104         case IB_QPT_GSI:
1105                 if (init_attr->port_num == 0 ||
1106                     init_attr->port_num > ibpd->device->phys_port_cnt)
1107                         return ERR_PTR(-EINVAL);
1108                 /* fall through */
1109         case IB_QPT_UC:
1110         case IB_QPT_RC:
1111         case IB_QPT_UD:
1112                 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1113                 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1114                 if (!swq)
1115                         return ERR_PTR(-ENOMEM);
1116 
1117                 sz = sizeof(*qp);
1118                 sg_list_sz = 0;
1119                 if (init_attr->srq) {
1120                         struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1121 
1122                         if (srq->rq.max_sge > 1)
1123                                 sg_list_sz = sizeof(*qp->r_sg_list) *
1124                                         (srq->rq.max_sge - 1);
1125                 } else if (init_attr->cap.max_recv_sge > 1)
1126                         sg_list_sz = sizeof(*qp->r_sg_list) *
1127                                 (init_attr->cap.max_recv_sge - 1);
1128                 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1129                                   rdi->dparms.node);
1130                 if (!qp)
1131                         goto bail_swq;
1132                 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1133 
1134                 RCU_INIT_POINTER(qp->next, NULL);
1135                 if (init_attr->qp_type == IB_QPT_RC) {
1136                         qp->s_ack_queue =
1137                                 kcalloc_node(rvt_max_atomic(rdi),
1138                                              sizeof(*qp->s_ack_queue),
1139                                              GFP_KERNEL,
1140                                              rdi->dparms.node);
1141                         if (!qp->s_ack_queue)
1142                                 goto bail_qp;
1143                 }
1144                 /* initialize timers needed for rc qp */
1145                 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1146                 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1147                              HRTIMER_MODE_REL);
1148                 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1149 
1150                 /*
1151                  * Driver needs to set up it's private QP structure and do any
1152                  * initialization that is needed.
1153                  */
1154                 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1155                 if (IS_ERR(priv)) {
1156                         ret = priv;
1157                         goto bail_qp;
1158                 }
1159                 qp->priv = priv;
1160                 qp->timeout_jiffies =
1161                         usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1162                                 1000UL);
1163                 if (init_attr->srq) {
1164                         sz = 0;
1165                 } else {
1166                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1167                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1168                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1169                                 sizeof(struct rvt_rwqe);
1170                         err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1171                                            rdi->dparms.node, udata);
1172                         if (err) {
1173                                 ret = ERR_PTR(err);
1174                                 goto bail_driver_priv;
1175                         }
1176                 }
1177 
1178                 /*
1179                  * ib_create_qp() will initialize qp->ibqp
1180                  * except for qp->ibqp.qp_num.
1181                  */
1182                 spin_lock_init(&qp->r_lock);
1183                 spin_lock_init(&qp->s_hlock);
1184                 spin_lock_init(&qp->s_lock);
1185                 atomic_set(&qp->refcount, 0);
1186                 atomic_set(&qp->local_ops_pending, 0);
1187                 init_waitqueue_head(&qp->wait);
1188                 INIT_LIST_HEAD(&qp->rspwait);
1189                 qp->state = IB_QPS_RESET;
1190                 qp->s_wq = swq;
1191                 qp->s_size = sqsize;
1192                 qp->s_avail = init_attr->cap.max_send_wr;
1193                 qp->s_max_sge = init_attr->cap.max_send_sge;
1194                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1195                         qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1196                 err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1197                 if (err) {
1198                         ret = (ERR_PTR(err));
1199                         goto bail_driver_priv;
1200                 }
1201 
1202                 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1203                                 init_attr->qp_type,
1204                                 init_attr->port_num);
1205                 if (err < 0) {
1206                         ret = ERR_PTR(err);
1207                         goto bail_rq_wq;
1208                 }
1209                 qp->ibqp.qp_num = err;
1210                 qp->port_num = init_attr->port_num;
1211                 rvt_init_qp(rdi, qp, init_attr->qp_type);
1212                 if (rdi->driver_f.qp_priv_init) {
1213                         err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1214                         if (err) {
1215                                 ret = ERR_PTR(err);
1216                                 goto bail_rq_wq;
1217                         }
1218                 }
1219                 break;
1220 
1221         default:
1222                 /* Don't support raw QPs */
1223                 return ERR_PTR(-EINVAL);
1224         }
1225 
1226         init_attr->cap.max_inline_data = 0;
1227 
1228         /*
1229          * Return the address of the RWQ as the offset to mmap.
1230          * See rvt_mmap() for details.
1231          */
1232         if (udata && udata->outlen >= sizeof(__u64)) {
1233                 if (!qp->r_rq.wq) {
1234                         __u64 offset = 0;
1235 
1236                         err = ib_copy_to_udata(udata, &offset,
1237                                                sizeof(offset));
1238                         if (err) {
1239                                 ret = ERR_PTR(err);
1240                                 goto bail_qpn;
1241                         }
1242                 } else {
1243                         u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1244 
1245                         qp->ip = rvt_create_mmap_info(rdi, s, udata,
1246                                                       qp->r_rq.wq);
1247                         if (IS_ERR(qp->ip)) {
1248                                 ret = ERR_CAST(qp->ip);
1249                                 goto bail_qpn;
1250                         }
1251 
1252                         err = ib_copy_to_udata(udata, &qp->ip->offset,
1253                                                sizeof(qp->ip->offset));
1254                         if (err) {
1255                                 ret = ERR_PTR(err);
1256                                 goto bail_ip;
1257                         }
1258                 }
1259                 qp->pid = current->pid;
1260         }
1261 
1262         spin_lock(&rdi->n_qps_lock);
1263         if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1264                 spin_unlock(&rdi->n_qps_lock);
1265                 ret = ERR_PTR(-ENOMEM);
1266                 goto bail_ip;
1267         }
1268 
1269         rdi->n_qps_allocated++;
1270         /*
1271          * Maintain a busy_jiffies variable that will be added to the timeout
1272          * period in mod_retry_timer and add_retry_timer. This busy jiffies
1273          * is scaled by the number of rc qps created for the device to reduce
1274          * the number of timeouts occurring when there is a large number of
1275          * qps. busy_jiffies is incremented every rc qp scaling interval.
1276          * The scaling interval is selected based on extensive performance
1277          * evaluation of targeted workloads.
1278          */
1279         if (init_attr->qp_type == IB_QPT_RC) {
1280                 rdi->n_rc_qps++;
1281                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1282         }
1283         spin_unlock(&rdi->n_qps_lock);
1284 
1285         if (qp->ip) {
1286                 spin_lock_irq(&rdi->pending_lock);
1287                 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1288                 spin_unlock_irq(&rdi->pending_lock);
1289         }
1290 
1291         ret = &qp->ibqp;
1292 
1293         return ret;
1294 
1295 bail_ip:
1296         if (qp->ip)
1297                 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1298 
1299 bail_qpn:
1300         rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1301 
1302 bail_rq_wq:
1303         rvt_free_rq(&qp->r_rq);
1304         free_ud_wq_attr(qp);
1305 
1306 bail_driver_priv:
1307         rdi->driver_f.qp_priv_free(rdi, qp);
1308 
1309 bail_qp:
1310         kfree(qp->s_ack_queue);
1311         kfree(qp);
1312 
1313 bail_swq:
1314         vfree(swq);
1315 
1316         return ret;
1317 }
1318 
1319 /**
1320  * rvt_error_qp - put a QP into the error state
1321  * @qp: the QP to put into the error state
1322  * @err: the receive completion error to signal if a RWQE is active
1323  *
1324  * Flushes both send and receive work queues.
1325  *
1326  * Return: true if last WQE event should be generated.
1327  * The QP r_lock and s_lock should be held and interrupts disabled.
1328  * If we are already in error state, just return.
1329  */
1330 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1331 {
1332         struct ib_wc wc;
1333         int ret = 0;
1334         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1335 
1336         lockdep_assert_held(&qp->r_lock);
1337         lockdep_assert_held(&qp->s_lock);
1338         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1339                 goto bail;
1340 
1341         qp->state = IB_QPS_ERR;
1342 
1343         if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1344                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1345                 del_timer(&qp->s_timer);
1346         }
1347 
1348         if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1349                 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1350 
1351         rdi->driver_f.notify_error_qp(qp);
1352 
1353         /* Schedule the sending tasklet to drain the send work queue. */
1354         if (READ_ONCE(qp->s_last) != qp->s_head)
1355                 rdi->driver_f.schedule_send(qp);
1356 
1357         rvt_clear_mr_refs(qp, 0);
1358 
1359         memset(&wc, 0, sizeof(wc));
1360         wc.qp = &qp->ibqp;
1361         wc.opcode = IB_WC_RECV;
1362 
1363         if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1364                 wc.wr_id = qp->r_wr_id;
1365                 wc.status = err;
1366                 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1367         }
1368         wc.status = IB_WC_WR_FLUSH_ERR;
1369 
1370         if (qp->r_rq.kwq) {
1371                 u32 head;
1372                 u32 tail;
1373                 struct rvt_rwq *wq = NULL;
1374                 struct rvt_krwq *kwq = NULL;
1375 
1376                 spin_lock(&qp->r_rq.kwq->c_lock);
1377                 /* qp->ip used to validate if there is a  user buffer mmaped */
1378                 if (qp->ip) {
1379                         wq = qp->r_rq.wq;
1380                         head = RDMA_READ_UAPI_ATOMIC(wq->head);
1381                         tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1382                 } else {
1383                         kwq = qp->r_rq.kwq;
1384                         head = kwq->head;
1385                         tail = kwq->tail;
1386                 }
1387                 /* sanity check pointers before trusting them */
1388                 if (head >= qp->r_rq.size)
1389                         head = 0;
1390                 if (tail >= qp->r_rq.size)
1391                         tail = 0;
1392                 while (tail != head) {
1393                         wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1394                         if (++tail >= qp->r_rq.size)
1395                                 tail = 0;
1396                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1397                 }
1398                 if (qp->ip)
1399                         RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1400                 else
1401                         kwq->tail = tail;
1402                 spin_unlock(&qp->r_rq.kwq->c_lock);
1403         } else if (qp->ibqp.event_handler) {
1404                 ret = 1;
1405         }
1406 
1407 bail:
1408         return ret;
1409 }
1410 EXPORT_SYMBOL(rvt_error_qp);
1411 
1412 /*
1413  * Put the QP into the hash table.
1414  * The hash table holds a reference to the QP.
1415  */
1416 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1417 {
1418         struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1419         unsigned long flags;
1420 
1421         rvt_get_qp(qp);
1422         spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1423 
1424         if (qp->ibqp.qp_num <= 1) {
1425                 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1426         } else {
1427                 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1428 
1429                 qp->next = rdi->qp_dev->qp_table[n];
1430                 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1431                 trace_rvt_qpinsert(qp, n);
1432         }
1433 
1434         spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1435 }
1436 
1437 /**
1438  * rvt_modify_qp - modify the attributes of a queue pair
1439  * @ibqp: the queue pair who's attributes we're modifying
1440  * @attr: the new attributes
1441  * @attr_mask: the mask of attributes to modify
1442  * @udata: user data for libibverbs.so
1443  *
1444  * Return: 0 on success, otherwise returns an errno.
1445  */
1446 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1447                   int attr_mask, struct ib_udata *udata)
1448 {
1449         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1450         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1451         enum ib_qp_state cur_state, new_state;
1452         struct ib_event ev;
1453         int lastwqe = 0;
1454         int mig = 0;
1455         int pmtu = 0; /* for gcc warning only */
1456         int opa_ah;
1457 
1458         spin_lock_irq(&qp->r_lock);
1459         spin_lock(&qp->s_hlock);
1460         spin_lock(&qp->s_lock);
1461 
1462         cur_state = attr_mask & IB_QP_CUR_STATE ?
1463                 attr->cur_qp_state : qp->state;
1464         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1465         opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1466 
1467         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1468                                 attr_mask))
1469                 goto inval;
1470 
1471         if (rdi->driver_f.check_modify_qp &&
1472             rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1473                 goto inval;
1474 
1475         if (attr_mask & IB_QP_AV) {
1476                 if (opa_ah) {
1477                         if (rdma_ah_get_dlid(&attr->ah_attr) >=
1478                                 opa_get_mcast_base(OPA_MCAST_NR))
1479                                 goto inval;
1480                 } else {
1481                         if (rdma_ah_get_dlid(&attr->ah_attr) >=
1482                                 be16_to_cpu(IB_MULTICAST_LID_BASE))
1483                                 goto inval;
1484                 }
1485 
1486                 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1487                         goto inval;
1488         }
1489 
1490         if (attr_mask & IB_QP_ALT_PATH) {
1491                 if (opa_ah) {
1492                         if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1493                                 opa_get_mcast_base(OPA_MCAST_NR))
1494                                 goto inval;
1495                 } else {
1496                         if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1497                                 be16_to_cpu(IB_MULTICAST_LID_BASE))
1498                                 goto inval;
1499                 }
1500 
1501                 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1502                         goto inval;
1503                 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1504                         goto inval;
1505         }
1506 
1507         if (attr_mask & IB_QP_PKEY_INDEX)
1508                 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1509                         goto inval;
1510 
1511         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1512                 if (attr->min_rnr_timer > 31)
1513                         goto inval;
1514 
1515         if (attr_mask & IB_QP_PORT)
1516                 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1517                     qp->ibqp.qp_type == IB_QPT_GSI ||
1518                     attr->port_num == 0 ||
1519                     attr->port_num > ibqp->device->phys_port_cnt)
1520                         goto inval;
1521 
1522         if (attr_mask & IB_QP_DEST_QPN)
1523                 if (attr->dest_qp_num > RVT_QPN_MASK)
1524                         goto inval;
1525 
1526         if (attr_mask & IB_QP_RETRY_CNT)
1527                 if (attr->retry_cnt > 7)
1528                         goto inval;
1529 
1530         if (attr_mask & IB_QP_RNR_RETRY)
1531                 if (attr->rnr_retry > 7)
1532                         goto inval;
1533 
1534         /*
1535          * Don't allow invalid path_mtu values.  OK to set greater
1536          * than the active mtu (or even the max_cap, if we have tuned
1537          * that to a small mtu.  We'll set qp->path_mtu
1538          * to the lesser of requested attribute mtu and active,
1539          * for packetizing messages.
1540          * Note that the QP port has to be set in INIT and MTU in RTR.
1541          */
1542         if (attr_mask & IB_QP_PATH_MTU) {
1543                 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1544                 if (pmtu < 0)
1545                         goto inval;
1546         }
1547 
1548         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1549                 if (attr->path_mig_state == IB_MIG_REARM) {
1550                         if (qp->s_mig_state == IB_MIG_ARMED)
1551                                 goto inval;
1552                         if (new_state != IB_QPS_RTS)
1553                                 goto inval;
1554                 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1555                         if (qp->s_mig_state == IB_MIG_REARM)
1556                                 goto inval;
1557                         if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1558                                 goto inval;
1559                         if (qp->s_mig_state == IB_MIG_ARMED)
1560                                 mig = 1;
1561                 } else {
1562                         goto inval;
1563                 }
1564         }
1565 
1566         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1567                 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1568                         goto inval;
1569 
1570         switch (new_state) {
1571         case IB_QPS_RESET:
1572                 if (qp->state != IB_QPS_RESET)
1573                         _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1574                 break;
1575 
1576         case IB_QPS_RTR:
1577                 /* Allow event to re-trigger if QP set to RTR more than once */
1578                 qp->r_flags &= ~RVT_R_COMM_EST;
1579                 qp->state = new_state;
1580                 break;
1581 
1582         case IB_QPS_SQD:
1583                 qp->s_draining = qp->s_last != qp->s_cur;
1584                 qp->state = new_state;
1585                 break;
1586 
1587         case IB_QPS_SQE:
1588                 if (qp->ibqp.qp_type == IB_QPT_RC)
1589                         goto inval;
1590                 qp->state = new_state;
1591                 break;
1592 
1593         case IB_QPS_ERR:
1594                 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1595                 break;
1596 
1597         default:
1598                 qp->state = new_state;
1599                 break;
1600         }
1601 
1602         if (attr_mask & IB_QP_PKEY_INDEX)
1603                 qp->s_pkey_index = attr->pkey_index;
1604 
1605         if (attr_mask & IB_QP_PORT)
1606                 qp->port_num = attr->port_num;
1607 
1608         if (attr_mask & IB_QP_DEST_QPN)
1609                 qp->remote_qpn = attr->dest_qp_num;
1610 
1611         if (attr_mask & IB_QP_SQ_PSN) {
1612                 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1613                 qp->s_psn = qp->s_next_psn;
1614                 qp->s_sending_psn = qp->s_next_psn;
1615                 qp->s_last_psn = qp->s_next_psn - 1;
1616                 qp->s_sending_hpsn = qp->s_last_psn;
1617         }
1618 
1619         if (attr_mask & IB_QP_RQ_PSN)
1620                 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1621 
1622         if (attr_mask & IB_QP_ACCESS_FLAGS)
1623                 qp->qp_access_flags = attr->qp_access_flags;
1624 
1625         if (attr_mask & IB_QP_AV) {
1626                 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1627                 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1628                 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1629         }
1630 
1631         if (attr_mask & IB_QP_ALT_PATH) {
1632                 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1633                 qp->s_alt_pkey_index = attr->alt_pkey_index;
1634         }
1635 
1636         if (attr_mask & IB_QP_PATH_MIG_STATE) {
1637                 qp->s_mig_state = attr->path_mig_state;
1638                 if (mig) {
1639                         qp->remote_ah_attr = qp->alt_ah_attr;
1640                         qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1641                         qp->s_pkey_index = qp->s_alt_pkey_index;
1642                 }
1643         }
1644 
1645         if (attr_mask & IB_QP_PATH_MTU) {
1646                 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1647                 qp->log_pmtu = ilog2(qp->pmtu);
1648         }
1649 
1650         if (attr_mask & IB_QP_RETRY_CNT) {
1651                 qp->s_retry_cnt = attr->retry_cnt;
1652                 qp->s_retry = attr->retry_cnt;
1653         }
1654 
1655         if (attr_mask & IB_QP_RNR_RETRY) {
1656                 qp->s_rnr_retry_cnt = attr->rnr_retry;
1657                 qp->s_rnr_retry = attr->rnr_retry;
1658         }
1659 
1660         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1661                 qp->r_min_rnr_timer = attr->min_rnr_timer;
1662 
1663         if (attr_mask & IB_QP_TIMEOUT) {
1664                 qp->timeout = attr->timeout;
1665                 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1666         }
1667 
1668         if (attr_mask & IB_QP_QKEY)
1669                 qp->qkey = attr->qkey;
1670 
1671         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1672                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1673 
1674         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1675                 qp->s_max_rd_atomic = attr->max_rd_atomic;
1676 
1677         if (rdi->driver_f.modify_qp)
1678                 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1679 
1680         spin_unlock(&qp->s_lock);
1681         spin_unlock(&qp->s_hlock);
1682         spin_unlock_irq(&qp->r_lock);
1683 
1684         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1685                 rvt_insert_qp(rdi, qp);
1686 
1687         if (lastwqe) {
1688                 ev.device = qp->ibqp.device;
1689                 ev.element.qp = &qp->ibqp;
1690                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1691                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1692         }
1693         if (mig) {
1694                 ev.device = qp->ibqp.device;
1695                 ev.element.qp = &qp->ibqp;
1696                 ev.event = IB_EVENT_PATH_MIG;
1697                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1698         }
1699         return 0;
1700 
1701 inval:
1702         spin_unlock(&qp->s_lock);
1703         spin_unlock(&qp->s_hlock);
1704         spin_unlock_irq(&qp->r_lock);
1705         return -EINVAL;
1706 }
1707 
1708 /**
1709  * rvt_destroy_qp - destroy a queue pair
1710  * @ibqp: the queue pair to destroy
1711  *
1712  * Note that this can be called while the QP is actively sending or
1713  * receiving!
1714  *
1715  * Return: 0 on success.
1716  */
1717 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1718 {
1719         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1720         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1721 
1722         rvt_reset_qp(rdi, qp, ibqp->qp_type);
1723 
1724         wait_event(qp->wait, !atomic_read(&qp->refcount));
1725         /* qpn is now available for use again */
1726         rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1727 
1728         spin_lock(&rdi->n_qps_lock);
1729         rdi->n_qps_allocated--;
1730         if (qp->ibqp.qp_type == IB_QPT_RC) {
1731                 rdi->n_rc_qps--;
1732                 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1733         }
1734         spin_unlock(&rdi->n_qps_lock);
1735 
1736         if (qp->ip)
1737                 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1738         kvfree(qp->r_rq.kwq);
1739         rdi->driver_f.qp_priv_free(rdi, qp);
1740         kfree(qp->s_ack_queue);
1741         rdma_destroy_ah_attr(&qp->remote_ah_attr);
1742         rdma_destroy_ah_attr(&qp->alt_ah_attr);
1743         free_ud_wq_attr(qp);
1744         vfree(qp->s_wq);
1745         kfree(qp);
1746         return 0;
1747 }
1748 
1749 /**
1750  * rvt_query_qp - query an ipbq
1751  * @ibqp: IB qp to query
1752  * @attr: attr struct to fill in
1753  * @attr_mask: attr mask ignored
1754  * @init_attr: struct to fill in
1755  *
1756  * Return: always 0
1757  */
1758 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1759                  int attr_mask, struct ib_qp_init_attr *init_attr)
1760 {
1761         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1762         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1763 
1764         attr->qp_state = qp->state;
1765         attr->cur_qp_state = attr->qp_state;
1766         attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1767         attr->path_mig_state = qp->s_mig_state;
1768         attr->qkey = qp->qkey;
1769         attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1770         attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1771         attr->dest_qp_num = qp->remote_qpn;
1772         attr->qp_access_flags = qp->qp_access_flags;
1773         attr->cap.max_send_wr = qp->s_size - 1 -
1774                 rdi->dparms.reserved_operations;
1775         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1776         attr->cap.max_send_sge = qp->s_max_sge;
1777         attr->cap.max_recv_sge = qp->r_rq.max_sge;
1778         attr->cap.max_inline_data = 0;
1779         attr->ah_attr = qp->remote_ah_attr;
1780         attr->alt_ah_attr = qp->alt_ah_attr;
1781         attr->pkey_index = qp->s_pkey_index;
1782         attr->alt_pkey_index = qp->s_alt_pkey_index;
1783         attr->en_sqd_async_notify = 0;
1784         attr->sq_draining = qp->s_draining;
1785         attr->max_rd_atomic = qp->s_max_rd_atomic;
1786         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1787         attr->min_rnr_timer = qp->r_min_rnr_timer;
1788         attr->port_num = qp->port_num;
1789         attr->timeout = qp->timeout;
1790         attr->retry_cnt = qp->s_retry_cnt;
1791         attr->rnr_retry = qp->s_rnr_retry_cnt;
1792         attr->alt_port_num =
1793                 rdma_ah_get_port_num(&qp->alt_ah_attr);
1794         attr->alt_timeout = qp->alt_timeout;
1795 
1796         init_attr->event_handler = qp->ibqp.event_handler;
1797         init_attr->qp_context = qp->ibqp.qp_context;
1798         init_attr->send_cq = qp->ibqp.send_cq;
1799         init_attr->recv_cq = qp->ibqp.recv_cq;
1800         init_attr->srq = qp->ibqp.srq;
1801         init_attr->cap = attr->cap;
1802         if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1803                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1804         else
1805                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1806         init_attr->qp_type = qp->ibqp.qp_type;
1807         init_attr->port_num = qp->port_num;
1808         return 0;
1809 }
1810 
1811 /**
1812  * rvt_post_receive - post a receive on a QP
1813  * @ibqp: the QP to post the receive on
1814  * @wr: the WR to post
1815  * @bad_wr: the first bad WR is put here
1816  *
1817  * This may be called from interrupt context.
1818  *
1819  * Return: 0 on success otherwise errno
1820  */
1821 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1822                   const struct ib_recv_wr **bad_wr)
1823 {
1824         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1825         struct rvt_krwq *wq = qp->r_rq.kwq;
1826         unsigned long flags;
1827         int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1828                                 !qp->ibqp.srq;
1829 
1830         /* Check that state is OK to post receive. */
1831         if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1832                 *bad_wr = wr;
1833                 return -EINVAL;
1834         }
1835 
1836         for (; wr; wr = wr->next) {
1837                 struct rvt_rwqe *wqe;
1838                 u32 next;
1839                 int i;
1840 
1841                 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1842                         *bad_wr = wr;
1843                         return -EINVAL;
1844                 }
1845 
1846                 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1847                 next = wq->head + 1;
1848                 if (next >= qp->r_rq.size)
1849                         next = 0;
1850                 if (next == READ_ONCE(wq->tail)) {
1851                         spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1852                         *bad_wr = wr;
1853                         return -ENOMEM;
1854                 }
1855                 if (unlikely(qp_err_flush)) {
1856                         struct ib_wc wc;
1857 
1858                         memset(&wc, 0, sizeof(wc));
1859                         wc.qp = &qp->ibqp;
1860                         wc.opcode = IB_WC_RECV;
1861                         wc.wr_id = wr->wr_id;
1862                         wc.status = IB_WC_WR_FLUSH_ERR;
1863                         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1864                 } else {
1865                         wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1866                         wqe->wr_id = wr->wr_id;
1867                         wqe->num_sge = wr->num_sge;
1868                         for (i = 0; i < wr->num_sge; i++) {
1869                                 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1870                                 wqe->sg_list[i].length = wr->sg_list[i].length;
1871                                 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1872                         }
1873                         /*
1874                          * Make sure queue entry is written
1875                          * before the head index.
1876                          */
1877                         smp_store_release(&wq->head, next);
1878                 }
1879                 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1880         }
1881         return 0;
1882 }
1883 
1884 /**
1885  * rvt_qp_valid_operation - validate post send wr request
1886  * @qp - the qp
1887  * @post-parms - the post send table for the driver
1888  * @wr - the work request
1889  *
1890  * The routine validates the operation based on the
1891  * validation table an returns the length of the operation
1892  * which can extend beyond the ib_send_bw.  Operation
1893  * dependent flags key atomic operation validation.
1894  *
1895  * There is an exception for UD qps that validates the pd and
1896  * overrides the length to include the additional UD specific
1897  * length.
1898  *
1899  * Returns a negative error or the length of the work request
1900  * for building the swqe.
1901  */
1902 static inline int rvt_qp_valid_operation(
1903         struct rvt_qp *qp,
1904         const struct rvt_operation_params *post_parms,
1905         const struct ib_send_wr *wr)
1906 {
1907         int len;
1908 
1909         if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1910                 return -EINVAL;
1911         if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1912                 return -EINVAL;
1913         if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1914             ibpd_to_rvtpd(qp->ibqp.pd)->user)
1915                 return -EINVAL;
1916         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1917             (wr->num_sge == 0 ||
1918              wr->sg_list[0].length < sizeof(u64) ||
1919              wr->sg_list[0].addr & (sizeof(u64) - 1)))
1920                 return -EINVAL;
1921         if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1922             !qp->s_max_rd_atomic)
1923                 return -EINVAL;
1924         len = post_parms[wr->opcode].length;
1925         /* UD specific */
1926         if (qp->ibqp.qp_type != IB_QPT_UC &&
1927             qp->ibqp.qp_type != IB_QPT_RC) {
1928                 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1929                         return -EINVAL;
1930                 len = sizeof(struct ib_ud_wr);
1931         }
1932         return len;
1933 }
1934 
1935 /**
1936  * rvt_qp_is_avail - determine queue capacity
1937  * @qp: the qp
1938  * @rdi: the rdmavt device
1939  * @reserved_op: is reserved operation
1940  *
1941  * This assumes the s_hlock is held but the s_last
1942  * qp variable is uncontrolled.
1943  *
1944  * For non reserved operations, the qp->s_avail
1945  * may be changed.
1946  *
1947  * The return value is zero or a -ENOMEM.
1948  */
1949 static inline int rvt_qp_is_avail(
1950         struct rvt_qp *qp,
1951         struct rvt_dev_info *rdi,
1952         bool reserved_op)
1953 {
1954         u32 slast;
1955         u32 avail;
1956         u32 reserved_used;
1957 
1958         /* see rvt_qp_wqe_unreserve() */
1959         smp_mb__before_atomic();
1960         if (unlikely(reserved_op)) {
1961                 /* see rvt_qp_wqe_unreserve() */
1962                 reserved_used = atomic_read(&qp->s_reserved_used);
1963                 if (reserved_used >= rdi->dparms.reserved_operations)
1964                         return -ENOMEM;
1965                 return 0;
1966         }
1967         /* non-reserved operations */
1968         if (likely(qp->s_avail))
1969                 return 0;
1970         /* See rvt_qp_complete_swqe() */
1971         slast = smp_load_acquire(&qp->s_last);
1972         if (qp->s_head >= slast)
1973                 avail = qp->s_size - (qp->s_head - slast);
1974         else
1975                 avail = slast - qp->s_head;
1976 
1977         reserved_used = atomic_read(&qp->s_reserved_used);
1978         avail =  avail - 1 -
1979                 (rdi->dparms.reserved_operations - reserved_used);
1980         /* insure we don't assign a negative s_avail */
1981         if ((s32)avail <= 0)
1982                 return -ENOMEM;
1983         qp->s_avail = avail;
1984         if (WARN_ON(qp->s_avail >
1985                     (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1986                 rvt_pr_err(rdi,
1987                            "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1988                            qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1989                            qp->s_head, qp->s_tail, qp->s_cur,
1990                            qp->s_acked, qp->s_last);
1991         return 0;
1992 }
1993 
1994 /**
1995  * rvt_post_one_wr - post one RC, UC, or UD send work request
1996  * @qp: the QP to post on
1997  * @wr: the work request to send
1998  */
1999 static int rvt_post_one_wr(struct rvt_qp *qp,
2000                            const struct ib_send_wr *wr,
2001                            bool *call_send)
2002 {
2003         struct rvt_swqe *wqe;
2004         u32 next;
2005         int i;
2006         int j;
2007         int acc;
2008         struct rvt_lkey_table *rkt;
2009         struct rvt_pd *pd;
2010         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2011         u8 log_pmtu;
2012         int ret;
2013         size_t cplen;
2014         bool reserved_op;
2015         int local_ops_delayed = 0;
2016 
2017         BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2018 
2019         /* IB spec says that num_sge == 0 is OK. */
2020         if (unlikely(wr->num_sge > qp->s_max_sge))
2021                 return -EINVAL;
2022 
2023         ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2024         if (ret < 0)
2025                 return ret;
2026         cplen = ret;
2027 
2028         /*
2029          * Local operations include fast register and local invalidate.
2030          * Fast register needs to be processed immediately because the
2031          * registered lkey may be used by following work requests and the
2032          * lkey needs to be valid at the time those requests are posted.
2033          * Local invalidate can be processed immediately if fencing is
2034          * not required and no previous local invalidate ops are pending.
2035          * Signaled local operations that have been processed immediately
2036          * need to have requests with "completion only" flags set posted
2037          * to the send queue in order to generate completions.
2038          */
2039         if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2040                 switch (wr->opcode) {
2041                 case IB_WR_REG_MR:
2042                         ret = rvt_fast_reg_mr(qp,
2043                                               reg_wr(wr)->mr,
2044                                               reg_wr(wr)->key,
2045                                               reg_wr(wr)->access);
2046                         if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2047                                 return ret;
2048                         break;
2049                 case IB_WR_LOCAL_INV:
2050                         if ((wr->send_flags & IB_SEND_FENCE) ||
2051                             atomic_read(&qp->local_ops_pending)) {
2052                                 local_ops_delayed = 1;
2053                         } else {
2054                                 ret = rvt_invalidate_rkey(
2055                                         qp, wr->ex.invalidate_rkey);
2056                                 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2057                                         return ret;
2058                         }
2059                         break;
2060                 default:
2061                         return -EINVAL;
2062                 }
2063         }
2064 
2065         reserved_op = rdi->post_parms[wr->opcode].flags &
2066                         RVT_OPERATION_USE_RESERVE;
2067         /* check for avail */
2068         ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2069         if (ret)
2070                 return ret;
2071         next = qp->s_head + 1;
2072         if (next >= qp->s_size)
2073                 next = 0;
2074 
2075         rkt = &rdi->lkey_table;
2076         pd = ibpd_to_rvtpd(qp->ibqp.pd);
2077         wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2078 
2079         /* cplen has length from above */
2080         memcpy(&wqe->wr, wr, cplen);
2081 
2082         wqe->length = 0;
2083         j = 0;
2084         if (wr->num_sge) {
2085                 struct rvt_sge *last_sge = NULL;
2086 
2087                 acc = wr->opcode >= IB_WR_RDMA_READ ?
2088                         IB_ACCESS_LOCAL_WRITE : 0;
2089                 for (i = 0; i < wr->num_sge; i++) {
2090                         u32 length = wr->sg_list[i].length;
2091 
2092                         if (length == 0)
2093                                 continue;
2094                         ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2095                                           &wr->sg_list[i], acc);
2096                         if (unlikely(ret < 0))
2097                                 goto bail_inval_free;
2098                         wqe->length += length;
2099                         if (ret)
2100                                 last_sge = &wqe->sg_list[j];
2101                         j += ret;
2102                 }
2103                 wqe->wr.num_sge = j;
2104         }
2105 
2106         /*
2107          * Calculate and set SWQE PSN values prior to handing it off
2108          * to the driver's check routine. This give the driver the
2109          * opportunity to adjust PSN values based on internal checks.
2110          */
2111         log_pmtu = qp->log_pmtu;
2112         if (qp->allowed_ops == IB_OPCODE_UD) {
2113                 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2114 
2115                 log_pmtu = ah->log_pmtu;
2116                 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2117         }
2118 
2119         if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2120                 if (local_ops_delayed)
2121                         atomic_inc(&qp->local_ops_pending);
2122                 else
2123                         wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2124                 wqe->ssn = 0;
2125                 wqe->psn = 0;
2126                 wqe->lpsn = 0;
2127         } else {
2128                 wqe->ssn = qp->s_ssn++;
2129                 wqe->psn = qp->s_next_psn;
2130                 wqe->lpsn = wqe->psn +
2131                                 (wqe->length ?
2132                                         ((wqe->length - 1) >> log_pmtu) :
2133                                         0);
2134         }
2135 
2136         /* general part of wqe valid - allow for driver checks */
2137         if (rdi->driver_f.setup_wqe) {
2138                 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2139                 if (ret < 0)
2140                         goto bail_inval_free_ref;
2141         }
2142 
2143         if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2144                 qp->s_next_psn = wqe->lpsn + 1;
2145 
2146         if (unlikely(reserved_op)) {
2147                 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2148                 rvt_qp_wqe_reserve(qp, wqe);
2149         } else {
2150                 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2151                 qp->s_avail--;
2152         }
2153         trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2154         smp_wmb(); /* see request builders */
2155         qp->s_head = next;
2156 
2157         return 0;
2158 
2159 bail_inval_free_ref:
2160         if (qp->allowed_ops == IB_OPCODE_UD)
2161                 rdma_destroy_ah_attr(wqe->ud_wr.attr);
2162 bail_inval_free:
2163         /* release mr holds */
2164         while (j) {
2165                 struct rvt_sge *sge = &wqe->sg_list[--j];
2166 
2167                 rvt_put_mr(sge->mr);
2168         }
2169         return ret;
2170 }
2171 
2172 /**
2173  * rvt_post_send - post a send on a QP
2174  * @ibqp: the QP to post the send on
2175  * @wr: the list of work requests to post
2176  * @bad_wr: the first bad WR is put here
2177  *
2178  * This may be called from interrupt context.
2179  *
2180  * Return: 0 on success else errno
2181  */
2182 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2183                   const struct ib_send_wr **bad_wr)
2184 {
2185         struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2186         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2187         unsigned long flags = 0;
2188         bool call_send;
2189         unsigned nreq = 0;
2190         int err = 0;
2191 
2192         spin_lock_irqsave(&qp->s_hlock, flags);
2193 
2194         /*
2195          * Ensure QP state is such that we can send. If not bail out early,
2196          * there is no need to do this every time we post a send.
2197          */
2198         if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2199                 spin_unlock_irqrestore(&qp->s_hlock, flags);
2200                 return -EINVAL;
2201         }
2202 
2203         /*
2204          * If the send queue is empty, and we only have a single WR then just go
2205          * ahead and kick the send engine into gear. Otherwise we will always
2206          * just schedule the send to happen later.
2207          */
2208         call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2209 
2210         for (; wr; wr = wr->next) {
2211                 err = rvt_post_one_wr(qp, wr, &call_send);
2212                 if (unlikely(err)) {
2213                         *bad_wr = wr;
2214                         goto bail;
2215                 }
2216                 nreq++;
2217         }
2218 bail:
2219         spin_unlock_irqrestore(&qp->s_hlock, flags);
2220         if (nreq) {
2221                 /*
2222                  * Only call do_send if there is exactly one packet, and the
2223                  * driver said it was ok.
2224                  */
2225                 if (nreq == 1 && call_send)
2226                         rdi->driver_f.do_send(qp);
2227                 else
2228                         rdi->driver_f.schedule_send_no_lock(qp);
2229         }
2230         return err;
2231 }
2232 
2233 /**
2234  * rvt_post_srq_receive - post a receive on a shared receive queue
2235  * @ibsrq: the SRQ to post the receive on
2236  * @wr: the list of work requests to post
2237  * @bad_wr: A pointer to the first WR to cause a problem is put here
2238  *
2239  * This may be called from interrupt context.
2240  *
2241  * Return: 0 on success else errno
2242  */
2243 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2244                       const struct ib_recv_wr **bad_wr)
2245 {
2246         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2247         struct rvt_krwq *wq;
2248         unsigned long flags;
2249 
2250         for (; wr; wr = wr->next) {
2251                 struct rvt_rwqe *wqe;
2252                 u32 next;
2253                 int i;
2254 
2255                 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2256                         *bad_wr = wr;
2257                         return -EINVAL;
2258                 }
2259 
2260                 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2261                 wq = srq->rq.kwq;
2262                 next = wq->head + 1;
2263                 if (next >= srq->rq.size)
2264                         next = 0;
2265                 if (next == READ_ONCE(wq->tail)) {
2266                         spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2267                         *bad_wr = wr;
2268                         return -ENOMEM;
2269                 }
2270 
2271                 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2272                 wqe->wr_id = wr->wr_id;
2273                 wqe->num_sge = wr->num_sge;
2274                 for (i = 0; i < wr->num_sge; i++) {
2275                         wqe->sg_list[i].addr = wr->sg_list[i].addr;
2276                         wqe->sg_list[i].length = wr->sg_list[i].length;
2277                         wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2278                 }
2279                 /* Make sure queue entry is written before the head index. */
2280                 smp_store_release(&wq->head, next);
2281                 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2282         }
2283         return 0;
2284 }
2285 
2286 /*
2287  * rvt used the internal kernel struct as part of its ABI, for now make sure
2288  * the kernel struct does not change layout. FIXME: rvt should never cast the
2289  * user struct to a kernel struct.
2290  */
2291 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2292 {
2293         BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2294                      offsetof(struct rvt_wqe_sge, addr));
2295         BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2296                      offsetof(struct rvt_wqe_sge, length));
2297         BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2298                      offsetof(struct rvt_wqe_sge, lkey));
2299         return (struct ib_sge *)sge;
2300 }
2301 
2302 /*
2303  * Validate a RWQE and fill in the SGE state.
2304  * Return 1 if OK.
2305  */
2306 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2307 {
2308         int i, j, ret;
2309         struct ib_wc wc;
2310         struct rvt_lkey_table *rkt;
2311         struct rvt_pd *pd;
2312         struct rvt_sge_state *ss;
2313         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2314 
2315         rkt = &rdi->lkey_table;
2316         pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2317         ss = &qp->r_sge;
2318         ss->sg_list = qp->r_sg_list;
2319         qp->r_len = 0;
2320         for (i = j = 0; i < wqe->num_sge; i++) {
2321                 if (wqe->sg_list[i].length == 0)
2322                         continue;
2323                 /* Check LKEY */
2324                 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2325                                   NULL, rvt_cast_sge(&wqe->sg_list[i]),
2326                                   IB_ACCESS_LOCAL_WRITE);
2327                 if (unlikely(ret <= 0))
2328                         goto bad_lkey;
2329                 qp->r_len += wqe->sg_list[i].length;
2330                 j++;
2331         }
2332         ss->num_sge = j;
2333         ss->total_len = qp->r_len;
2334         return 1;
2335 
2336 bad_lkey:
2337         while (j) {
2338                 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2339 
2340                 rvt_put_mr(sge->mr);
2341         }
2342         ss->num_sge = 0;
2343         memset(&wc, 0, sizeof(wc));
2344         wc.wr_id = wqe->wr_id;
2345         wc.status = IB_WC_LOC_PROT_ERR;
2346         wc.opcode = IB_WC_RECV;
2347         wc.qp = &qp->ibqp;
2348         /* Signal solicited completion event. */
2349         rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2350         return 0;
2351 }
2352 
2353 /**
2354  * get_count - count numbers of request work queue entries
2355  * in circular buffer
2356  * @rq: data structure for request queue entry
2357  * @tail: tail indices of the circular buffer
2358  * @head: head indices of the circular buffer
2359  *
2360  * Return - total number of entries in the circular buffer
2361  */
2362 static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
2363 {
2364         u32 count;
2365 
2366         count = head;
2367 
2368         if (count >= rq->size)
2369                 count = 0;
2370         if (count < tail)
2371                 count += rq->size - tail;
2372         else
2373                 count -= tail;
2374 
2375         return count;
2376 }
2377 
2378 /**
2379  * get_rvt_head - get head indices of the circular buffer
2380  * @rq: data structure for request queue entry
2381  * @ip: the QP
2382  *
2383  * Return - head index value
2384  */
2385 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2386 {
2387         u32 head;
2388 
2389         if (ip)
2390                 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2391         else
2392                 head = rq->kwq->head;
2393 
2394         return head;
2395 }
2396 
2397 /**
2398  * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2399  * @qp: the QP
2400  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2401  *
2402  * Return -1 if there is a local error, 0 if no RWQE is available,
2403  * otherwise return 1.
2404  *
2405  * Can be called from interrupt level.
2406  */
2407 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2408 {
2409         unsigned long flags;
2410         struct rvt_rq *rq;
2411         struct rvt_krwq *kwq = NULL;
2412         struct rvt_rwq *wq;
2413         struct rvt_srq *srq;
2414         struct rvt_rwqe *wqe;
2415         void (*handler)(struct ib_event *, void *);
2416         u32 tail;
2417         u32 head;
2418         int ret;
2419         void *ip = NULL;
2420 
2421         if (qp->ibqp.srq) {
2422                 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2423                 handler = srq->ibsrq.event_handler;
2424                 rq = &srq->rq;
2425                 ip = srq->ip;
2426         } else {
2427                 srq = NULL;
2428                 handler = NULL;
2429                 rq = &qp->r_rq;
2430                 ip = qp->ip;
2431         }
2432 
2433         spin_lock_irqsave(&rq->kwq->c_lock, flags);
2434         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2435                 ret = 0;
2436                 goto unlock;
2437         }
2438         kwq = rq->kwq;
2439         if (ip) {
2440                 wq = rq->wq;
2441                 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2442         } else {
2443                 tail = kwq->tail;
2444         }
2445 
2446         /* Validate tail before using it since it is user writable. */
2447         if (tail >= rq->size)
2448                 tail = 0;
2449 
2450         if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2451                 head = get_rvt_head(rq, ip);
2452                 kwq->count = get_count(rq, tail, head);
2453         }
2454         if (unlikely(kwq->count == 0)) {
2455                 ret = 0;
2456                 goto unlock;
2457         }
2458         /* Make sure entry is read after the count is read. */
2459         smp_rmb();
2460         wqe = rvt_get_rwqe_ptr(rq, tail);
2461         /*
2462          * Even though we update the tail index in memory, the verbs
2463          * consumer is not supposed to post more entries until a
2464          * completion is generated.
2465          */
2466         if (++tail >= rq->size)
2467                 tail = 0;
2468         if (ip)
2469                 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2470         else
2471                 kwq->tail = tail;
2472         if (!wr_id_only && !init_sge(qp, wqe)) {
2473                 ret = -1;
2474                 goto unlock;
2475         }
2476         qp->r_wr_id = wqe->wr_id;
2477 
2478         kwq->count--;
2479         ret = 1;
2480         set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2481         if (handler) {
2482                 /*
2483                  * Validate head pointer value and compute
2484                  * the number of remaining WQEs.
2485                  */
2486                 if (kwq->count < srq->limit) {
2487                         kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
2488                         if (kwq->count < srq->limit) {
2489                                 struct ib_event ev;
2490 
2491                                 srq->limit = 0;
2492                                 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2493                                 ev.device = qp->ibqp.device;
2494                                 ev.element.srq = qp->ibqp.srq;
2495                                 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2496                                 handler(&ev, srq->ibsrq.srq_context);
2497                                 goto bail;
2498                         }
2499                 }
2500         }
2501 unlock:
2502         spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2503 bail:
2504         return ret;
2505 }
2506 EXPORT_SYMBOL(rvt_get_rwqe);
2507 
2508 /**
2509  * qp_comm_est - handle trap with QP established
2510  * @qp: the QP
2511  */
2512 void rvt_comm_est(struct rvt_qp *qp)
2513 {
2514         qp->r_flags |= RVT_R_COMM_EST;
2515         if (qp->ibqp.event_handler) {
2516                 struct ib_event ev;
2517 
2518                 ev.device = qp->ibqp.device;
2519                 ev.element.qp = &qp->ibqp;
2520                 ev.event = IB_EVENT_COMM_EST;
2521                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2522         }
2523 }
2524 EXPORT_SYMBOL(rvt_comm_est);
2525 
2526 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2527 {
2528         unsigned long flags;
2529         int lastwqe;
2530 
2531         spin_lock_irqsave(&qp->s_lock, flags);
2532         lastwqe = rvt_error_qp(qp, err);
2533         spin_unlock_irqrestore(&qp->s_lock, flags);
2534 
2535         if (lastwqe) {
2536                 struct ib_event ev;
2537 
2538                 ev.device = qp->ibqp.device;
2539                 ev.element.qp = &qp->ibqp;
2540                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2541                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2542         }
2543 }
2544 EXPORT_SYMBOL(rvt_rc_error);
2545 
2546 /*
2547  *  rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2548  *  @index - the index
2549  *  return usec from an index into ib_rvt_rnr_table
2550  */
2551 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2552 {
2553         return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2554 }
2555 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2556 
2557 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2558 {
2559         return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2560                                   IB_AETH_CREDIT_MASK];
2561 }
2562 
2563 /*
2564  *  rvt_add_retry_timer_ext - add/start a retry timer
2565  *  @qp - the QP
2566  *  @shift - timeout shift to wait for multiple packets
2567  *  add a retry timer on the QP
2568  */
2569 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2570 {
2571         struct ib_qp *ibqp = &qp->ibqp;
2572         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2573 
2574         lockdep_assert_held(&qp->s_lock);
2575         qp->s_flags |= RVT_S_TIMER;
2576        /* 4.096 usec. * (1 << qp->timeout) */
2577         qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2578                               (qp->timeout_jiffies << shift);
2579         add_timer(&qp->s_timer);
2580 }
2581 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2582 
2583 /**
2584  * rvt_add_rnr_timer - add/start an rnr timer
2585  * @qp - the QP
2586  * @aeth - aeth of RNR timeout, simulated aeth for loopback
2587  * add an rnr timer on the QP
2588  */
2589 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2590 {
2591         u32 to;
2592 
2593         lockdep_assert_held(&qp->s_lock);
2594         qp->s_flags |= RVT_S_WAIT_RNR;
2595         to = rvt_aeth_to_usec(aeth);
2596         trace_rvt_rnrnak_add(qp, to);
2597         hrtimer_start(&qp->s_rnr_timer,
2598                       ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2599 }
2600 EXPORT_SYMBOL(rvt_add_rnr_timer);
2601 
2602 /**
2603  * rvt_stop_rc_timers - stop all timers
2604  * @qp - the QP
2605  * stop any pending timers
2606  */
2607 void rvt_stop_rc_timers(struct rvt_qp *qp)
2608 {
2609         lockdep_assert_held(&qp->s_lock);
2610         /* Remove QP from all timers */
2611         if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2612                 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2613                 del_timer(&qp->s_timer);
2614                 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2615         }
2616 }
2617 EXPORT_SYMBOL(rvt_stop_rc_timers);
2618 
2619 /**
2620  * rvt_stop_rnr_timer - stop an rnr timer
2621  * @qp - the QP
2622  *
2623  * stop an rnr timer and return if the timer
2624  * had been pending.
2625  */
2626 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2627 {
2628         lockdep_assert_held(&qp->s_lock);
2629         /* Remove QP from rnr timer */
2630         if (qp->s_flags & RVT_S_WAIT_RNR) {
2631                 qp->s_flags &= ~RVT_S_WAIT_RNR;
2632                 trace_rvt_rnrnak_stop(qp, 0);
2633         }
2634 }
2635 
2636 /**
2637  * rvt_del_timers_sync - wait for any timeout routines to exit
2638  * @qp - the QP
2639  */
2640 void rvt_del_timers_sync(struct rvt_qp *qp)
2641 {
2642         del_timer_sync(&qp->s_timer);
2643         hrtimer_cancel(&qp->s_rnr_timer);
2644 }
2645 EXPORT_SYMBOL(rvt_del_timers_sync);
2646 
2647 /**
2648  * This is called from s_timer for missing responses.
2649  */
2650 static void rvt_rc_timeout(struct timer_list *t)
2651 {
2652         struct rvt_qp *qp = from_timer(qp, t, s_timer);
2653         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2654         unsigned long flags;
2655 
2656         spin_lock_irqsave(&qp->r_lock, flags);
2657         spin_lock(&qp->s_lock);
2658         if (qp->s_flags & RVT_S_TIMER) {
2659                 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2660 
2661                 qp->s_flags &= ~RVT_S_TIMER;
2662                 rvp->n_rc_timeouts++;
2663                 del_timer(&qp->s_timer);
2664                 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2665                 if (rdi->driver_f.notify_restart_rc)
2666                         rdi->driver_f.notify_restart_rc(qp,
2667                                                         qp->s_last_psn + 1,
2668                                                         1);
2669                 rdi->driver_f.schedule_send(qp);
2670         }
2671         spin_unlock(&qp->s_lock);
2672         spin_unlock_irqrestore(&qp->r_lock, flags);
2673 }
2674 
2675 /*
2676  * This is called from s_timer for RNR timeouts.
2677  */
2678 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2679 {
2680         struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2681         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2682         unsigned long flags;
2683 
2684         spin_lock_irqsave(&qp->s_lock, flags);
2685         rvt_stop_rnr_timer(qp);
2686         trace_rvt_rnrnak_timeout(qp, 0);
2687         rdi->driver_f.schedule_send(qp);
2688         spin_unlock_irqrestore(&qp->s_lock, flags);
2689         return HRTIMER_NORESTART;
2690 }
2691 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2692 
2693 /**
2694  * rvt_qp_iter_init - initial for QP iteration
2695  * @rdi: rvt devinfo
2696  * @v: u64 value
2697  *
2698  * This returns an iterator suitable for iterating QPs
2699  * in the system.
2700  *
2701  * The @cb is a user defined callback and @v is a 64
2702  * bit value passed to and relevant for processing in the
2703  * @cb.  An example use case would be to alter QP processing
2704  * based on criteria not part of the rvt_qp.
2705  *
2706  * Use cases that require memory allocation to succeed
2707  * must preallocate appropriately.
2708  *
2709  * Return: a pointer to an rvt_qp_iter or NULL
2710  */
2711 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2712                                      u64 v,
2713                                      void (*cb)(struct rvt_qp *qp, u64 v))
2714 {
2715         struct rvt_qp_iter *i;
2716 
2717         i = kzalloc(sizeof(*i), GFP_KERNEL);
2718         if (!i)
2719                 return NULL;
2720 
2721         i->rdi = rdi;
2722         /* number of special QPs (SMI/GSI) for device */
2723         i->specials = rdi->ibdev.phys_port_cnt * 2;
2724         i->v = v;
2725         i->cb = cb;
2726 
2727         return i;
2728 }
2729 EXPORT_SYMBOL(rvt_qp_iter_init);
2730 
2731 /**
2732  * rvt_qp_iter_next - return the next QP in iter
2733  * @iter - the iterator
2734  *
2735  * Fine grained QP iterator suitable for use
2736  * with debugfs seq_file mechanisms.
2737  *
2738  * Updates iter->qp with the current QP when the return
2739  * value is 0.
2740  *
2741  * Return: 0 - iter->qp is valid 1 - no more QPs
2742  */
2743 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2744         __must_hold(RCU)
2745 {
2746         int n = iter->n;
2747         int ret = 1;
2748         struct rvt_qp *pqp = iter->qp;
2749         struct rvt_qp *qp;
2750         struct rvt_dev_info *rdi = iter->rdi;
2751 
2752         /*
2753          * The approach is to consider the special qps
2754          * as additional table entries before the
2755          * real hash table.  Since the qp code sets
2756          * the qp->next hash link to NULL, this works just fine.
2757          *
2758          * iter->specials is 2 * # ports
2759          *
2760          * n = 0..iter->specials is the special qp indices
2761          *
2762          * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2763          * the potential hash bucket entries
2764          *
2765          */
2766         for (; n <  rdi->qp_dev->qp_table_size + iter->specials; n++) {
2767                 if (pqp) {
2768                         qp = rcu_dereference(pqp->next);
2769                 } else {
2770                         if (n < iter->specials) {
2771                                 struct rvt_ibport *rvp;
2772                                 int pidx;
2773 
2774                                 pidx = n % rdi->ibdev.phys_port_cnt;
2775                                 rvp = rdi->ports[pidx];
2776                                 qp = rcu_dereference(rvp->qp[n & 1]);
2777                         } else {
2778                                 qp = rcu_dereference(
2779                                         rdi->qp_dev->qp_table[
2780                                                 (n - iter->specials)]);
2781                         }
2782                 }
2783                 pqp = qp;
2784                 if (qp) {
2785                         iter->qp = qp;
2786                         iter->n = n;
2787                         return 0;
2788                 }
2789         }
2790         return ret;
2791 }
2792 EXPORT_SYMBOL(rvt_qp_iter_next);
2793 
2794 /**
2795  * rvt_qp_iter - iterate all QPs
2796  * @rdi - rvt devinfo
2797  * @v - a 64 bit value
2798  * @cb - a callback
2799  *
2800  * This provides a way for iterating all QPs.
2801  *
2802  * The @cb is a user defined callback and @v is a 64
2803  * bit value passed to and relevant for processing in the
2804  * cb.  An example use case would be to alter QP processing
2805  * based on criteria not part of the rvt_qp.
2806  *
2807  * The code has an internal iterator to simplify
2808  * non seq_file use cases.
2809  */
2810 void rvt_qp_iter(struct rvt_dev_info *rdi,
2811                  u64 v,
2812                  void (*cb)(struct rvt_qp *qp, u64 v))
2813 {
2814         int ret;
2815         struct rvt_qp_iter i = {
2816                 .rdi = rdi,
2817                 .specials = rdi->ibdev.phys_port_cnt * 2,
2818                 .v = v,
2819                 .cb = cb
2820         };
2821 
2822         rcu_read_lock();
2823         do {
2824                 ret = rvt_qp_iter_next(&i);
2825                 if (!ret) {
2826                         rvt_get_qp(i.qp);
2827                         rcu_read_unlock();
2828                         i.cb(i.qp, i.v);
2829                         rcu_read_lock();
2830                         rvt_put_qp(i.qp);
2831                 }
2832         } while (!ret);
2833         rcu_read_unlock();
2834 }
2835 EXPORT_SYMBOL(rvt_qp_iter);
2836 
2837 /*
2838  * This should be called with s_lock held.
2839  */
2840 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2841                        enum ib_wc_status status)
2842 {
2843         u32 old_last, last;
2844         struct rvt_dev_info *rdi;
2845 
2846         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2847                 return;
2848         rdi = ib_to_rvt(qp->ibqp.device);
2849 
2850         old_last = qp->s_last;
2851         trace_rvt_qp_send_completion(qp, wqe, old_last);
2852         last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2853                                     status);
2854         if (qp->s_acked == old_last)
2855                 qp->s_acked = last;
2856         if (qp->s_cur == old_last)
2857                 qp->s_cur = last;
2858         if (qp->s_tail == old_last)
2859                 qp->s_tail = last;
2860         if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2861                 qp->s_draining = 0;
2862 }
2863 EXPORT_SYMBOL(rvt_send_complete);
2864 
2865 /**
2866  * rvt_copy_sge - copy data to SGE memory
2867  * @qp: associated QP
2868  * @ss: the SGE state
2869  * @data: the data to copy
2870  * @length: the length of the data
2871  * @release: boolean to release MR
2872  * @copy_last: do a separate copy of the last 8 bytes
2873  */
2874 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2875                   void *data, u32 length,
2876                   bool release, bool copy_last)
2877 {
2878         struct rvt_sge *sge = &ss->sge;
2879         int i;
2880         bool in_last = false;
2881         bool cacheless_copy = false;
2882         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2883         struct rvt_wss *wss = rdi->wss;
2884         unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2885 
2886         if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2887                 cacheless_copy = length >= PAGE_SIZE;
2888         } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2889                 if (length >= PAGE_SIZE) {
2890                         /*
2891                          * NOTE: this *assumes*:
2892                          * o The first vaddr is the dest.
2893                          * o If multiple pages, then vaddr is sequential.
2894                          */
2895                         wss_insert(wss, sge->vaddr);
2896                         if (length >= (2 * PAGE_SIZE))
2897                                 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2898 
2899                         cacheless_copy = wss_exceeds_threshold(wss);
2900                 } else {
2901                         wss_advance_clean_counter(wss);
2902                 }
2903         }
2904 
2905         if (copy_last) {
2906                 if (length > 8) {
2907                         length -= 8;
2908                 } else {
2909                         copy_last = false;
2910                         in_last = true;
2911                 }
2912         }
2913 
2914 again:
2915         while (length) {
2916                 u32 len = rvt_get_sge_length(sge, length);
2917 
2918                 WARN_ON_ONCE(len == 0);
2919                 if (unlikely(in_last)) {
2920                         /* enforce byte transfer ordering */
2921                         for (i = 0; i < len; i++)
2922                                 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2923                 } else if (cacheless_copy) {
2924                         cacheless_memcpy(sge->vaddr, data, len);
2925                 } else {
2926                         memcpy(sge->vaddr, data, len);
2927                 }
2928                 rvt_update_sge(ss, len, release);
2929                 data += len;
2930                 length -= len;
2931         }
2932 
2933         if (copy_last) {
2934                 copy_last = false;
2935                 in_last = true;
2936                 length = 8;
2937                 goto again;
2938         }
2939 }
2940 EXPORT_SYMBOL(rvt_copy_sge);
2941 
2942 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2943                                           struct rvt_qp *sqp)
2944 {
2945         rvp->n_pkt_drops++;
2946         /*
2947          * For RC, the requester would timeout and retry so
2948          * shortcut the timeouts and just signal too many retries.
2949          */
2950         return sqp->ibqp.qp_type == IB_QPT_RC ?
2951                 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2952 }
2953 
2954 /**
2955  * ruc_loopback - handle UC and RC loopback requests
2956  * @sqp: the sending QP
2957  *
2958  * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2959  * Note that although we are single threaded due to the send engine, we still
2960  * have to protect against post_send().  We don't have to worry about
2961  * receive interrupts since this is a connected protocol and all packets
2962  * will pass through here.
2963  */
2964 void rvt_ruc_loopback(struct rvt_qp *sqp)
2965 {
2966         struct rvt_ibport *rvp =  NULL;
2967         struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2968         struct rvt_qp *qp;
2969         struct rvt_swqe *wqe;
2970         struct rvt_sge *sge;
2971         unsigned long flags;
2972         struct ib_wc wc;
2973         u64 sdata;
2974         atomic64_t *maddr;
2975         enum ib_wc_status send_status;
2976         bool release;
2977         int ret;
2978         bool copy_last = false;
2979         int local_ops = 0;
2980 
2981         rcu_read_lock();
2982         rvp = rdi->ports[sqp->port_num - 1];
2983 
2984         /*
2985          * Note that we check the responder QP state after
2986          * checking the requester's state.
2987          */
2988 
2989         qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2990                             sqp->remote_qpn);
2991 
2992         spin_lock_irqsave(&sqp->s_lock, flags);
2993 
2994         /* Return if we are already busy processing a work request. */
2995         if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2996             !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2997                 goto unlock;
2998 
2999         sqp->s_flags |= RVT_S_BUSY;
3000 
3001 again:
3002         if (sqp->s_last == READ_ONCE(sqp->s_head))
3003                 goto clr_busy;
3004         wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
3005 
3006         /* Return if it is not OK to start a new work request. */
3007         if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
3008                 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
3009                         goto clr_busy;
3010                 /* We are in the error state, flush the work request. */
3011                 send_status = IB_WC_WR_FLUSH_ERR;
3012                 goto flush_send;
3013         }
3014 
3015         /*
3016          * We can rely on the entry not changing without the s_lock
3017          * being held until we update s_last.
3018          * We increment s_cur to indicate s_last is in progress.
3019          */
3020         if (sqp->s_last == sqp->s_cur) {
3021                 if (++sqp->s_cur >= sqp->s_size)
3022                         sqp->s_cur = 0;
3023         }
3024         spin_unlock_irqrestore(&sqp->s_lock, flags);
3025 
3026         if (!qp) {
3027                 send_status = loopback_qp_drop(rvp, sqp);
3028                 goto serr_no_r_lock;
3029         }
3030         spin_lock_irqsave(&qp->r_lock, flags);
3031         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3032             qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3033                 send_status = loopback_qp_drop(rvp, sqp);
3034                 goto serr;
3035         }
3036 
3037         memset(&wc, 0, sizeof(wc));
3038         send_status = IB_WC_SUCCESS;
3039 
3040         release = true;
3041         sqp->s_sge.sge = wqe->sg_list[0];
3042         sqp->s_sge.sg_list = wqe->sg_list + 1;
3043         sqp->s_sge.num_sge = wqe->wr.num_sge;
3044         sqp->s_len = wqe->length;
3045         switch (wqe->wr.opcode) {
3046         case IB_WR_REG_MR:
3047                 goto send_comp;
3048 
3049         case IB_WR_LOCAL_INV:
3050                 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3051                         if (rvt_invalidate_rkey(sqp,
3052                                                 wqe->wr.ex.invalidate_rkey))
3053                                 send_status = IB_WC_LOC_PROT_ERR;
3054                         local_ops = 1;
3055                 }
3056                 goto send_comp;
3057 
3058         case IB_WR_SEND_WITH_INV:
3059         case IB_WR_SEND_WITH_IMM:
3060         case IB_WR_SEND:
3061                 ret = rvt_get_rwqe(qp, false);
3062                 if (ret < 0)
3063                         goto op_err;
3064                 if (!ret)
3065                         goto rnr_nak;
3066                 if (wqe->length > qp->r_len)
3067                         goto inv_err;
3068                 switch (wqe->wr.opcode) {
3069                 case IB_WR_SEND_WITH_INV:
3070                         if (!rvt_invalidate_rkey(qp,
3071                                                  wqe->wr.ex.invalidate_rkey)) {
3072                                 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3073                                 wc.ex.invalidate_rkey =
3074                                         wqe->wr.ex.invalidate_rkey;
3075                         }
3076                         break;
3077                 case IB_WR_SEND_WITH_IMM:
3078                         wc.wc_flags = IB_WC_WITH_IMM;
3079                         wc.ex.imm_data = wqe->wr.ex.imm_data;
3080                         break;
3081                 default:
3082                         break;
3083                 }
3084                 break;
3085 
3086         case IB_WR_RDMA_WRITE_WITH_IMM:
3087                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3088                         goto inv_err;
3089                 wc.wc_flags = IB_WC_WITH_IMM;
3090                 wc.ex.imm_data = wqe->wr.ex.imm_data;
3091                 ret = rvt_get_rwqe(qp, true);
3092                 if (ret < 0)
3093                         goto op_err;
3094                 if (!ret)
3095                         goto rnr_nak;
3096                 /* skip copy_last set and qp_access_flags recheck */
3097                 goto do_write;
3098         case IB_WR_RDMA_WRITE:
3099                 copy_last = rvt_is_user_qp(qp);
3100                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3101                         goto inv_err;
3102 do_write:
3103                 if (wqe->length == 0)
3104                         break;
3105                 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3106                                           wqe->rdma_wr.remote_addr,
3107                                           wqe->rdma_wr.rkey,
3108                                           IB_ACCESS_REMOTE_WRITE)))
3109                         goto acc_err;
3110                 qp->r_sge.sg_list = NULL;
3111                 qp->r_sge.num_sge = 1;
3112                 qp->r_sge.total_len = wqe->length;
3113                 break;
3114 
3115         case IB_WR_RDMA_READ:
3116                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3117                         goto inv_err;
3118                 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3119                                           wqe->rdma_wr.remote_addr,
3120                                           wqe->rdma_wr.rkey,
3121                                           IB_ACCESS_REMOTE_READ)))
3122                         goto acc_err;
3123                 release = false;
3124                 sqp->s_sge.sg_list = NULL;
3125                 sqp->s_sge.num_sge = 1;
3126                 qp->r_sge.sge = wqe->sg_list[0];
3127                 qp->r_sge.sg_list = wqe->sg_list + 1;
3128                 qp->r_sge.num_sge = wqe->wr.num_sge;
3129                 qp->r_sge.total_len = wqe->length;
3130                 break;
3131 
3132         case IB_WR_ATOMIC_CMP_AND_SWP:
3133         case IB_WR_ATOMIC_FETCH_AND_ADD:
3134                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3135                         goto inv_err;
3136                 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3137                                           wqe->atomic_wr.remote_addr,
3138                                           wqe->atomic_wr.rkey,
3139                                           IB_ACCESS_REMOTE_ATOMIC)))
3140                         goto acc_err;
3141                 /* Perform atomic OP and save result. */
3142                 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3143                 sdata = wqe->atomic_wr.compare_add;
3144                 *(u64 *)sqp->s_sge.sge.vaddr =
3145                         (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3146                         (u64)atomic64_add_return(sdata, maddr) - sdata :
3147                         (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3148                                       sdata, wqe->atomic_wr.swap);
3149                 rvt_put_mr(qp->r_sge.sge.mr);
3150                 qp->r_sge.num_sge = 0;
3151                 goto send_comp;
3152 
3153         default:
3154                 send_status = IB_WC_LOC_QP_OP_ERR;
3155                 goto serr;
3156         }
3157 
3158         sge = &sqp->s_sge.sge;
3159         while (sqp->s_len) {
3160                 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3161 
3162                 WARN_ON_ONCE(len == 0);
3163                 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3164                              len, release, copy_last);
3165                 rvt_update_sge(&sqp->s_sge, len, !release);
3166                 sqp->s_len -= len;
3167         }
3168         if (release)
3169                 rvt_put_ss(&qp->r_sge);
3170 
3171         if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3172                 goto send_comp;
3173 
3174         if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3175                 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3176         else
3177                 wc.opcode = IB_WC_RECV;
3178         wc.wr_id = qp->r_wr_id;
3179         wc.status = IB_WC_SUCCESS;
3180         wc.byte_len = wqe->length;
3181         wc.qp = &qp->ibqp;
3182         wc.src_qp = qp->remote_qpn;
3183         wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3184         wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3185         wc.port_num = 1;
3186         /* Signal completion event if the solicited bit is set. */
3187         rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3188 
3189 send_comp:
3190         spin_unlock_irqrestore(&qp->r_lock, flags);
3191         spin_lock_irqsave(&sqp->s_lock, flags);
3192         rvp->n_loop_pkts++;
3193 flush_send:
3194         sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3195         rvt_send_complete(sqp, wqe, send_status);
3196         if (local_ops) {
3197                 atomic_dec(&sqp->local_ops_pending);
3198                 local_ops = 0;
3199         }
3200         goto again;
3201 
3202 rnr_nak:
3203         /* Handle RNR NAK */
3204         if (qp->ibqp.qp_type == IB_QPT_UC)
3205                 goto send_comp;
3206         rvp->n_rnr_naks++;
3207         /*
3208          * Note: we don't need the s_lock held since the BUSY flag
3209          * makes this single threaded.
3210          */
3211         if (sqp->s_rnr_retry == 0) {
3212                 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3213                 goto serr;
3214         }
3215         if (sqp->s_rnr_retry_cnt < 7)
3216                 sqp->s_rnr_retry--;
3217         spin_unlock_irqrestore(&qp->r_lock, flags);
3218         spin_lock_irqsave(&sqp->s_lock, flags);
3219         if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3220                 goto clr_busy;
3221         rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3222                                 IB_AETH_CREDIT_SHIFT);
3223         goto clr_busy;
3224 
3225 op_err:
3226         send_status = IB_WC_REM_OP_ERR;
3227         wc.status = IB_WC_LOC_QP_OP_ERR;
3228         goto err;
3229 
3230 inv_err:
3231         send_status =
3232                 sqp->ibqp.qp_type == IB_QPT_RC ?
3233                         IB_WC_REM_INV_REQ_ERR :
3234                         IB_WC_SUCCESS;
3235         wc.status = IB_WC_LOC_QP_OP_ERR;
3236         goto err;
3237 
3238 acc_err:
3239         send_status = IB_WC_REM_ACCESS_ERR;
3240         wc.status = IB_WC_LOC_PROT_ERR;
3241 err:
3242         /* responder goes to error state */
3243         rvt_rc_error(qp, wc.status);
3244 
3245 serr:
3246         spin_unlock_irqrestore(&qp->r_lock, flags);
3247 serr_no_r_lock:
3248         spin_lock_irqsave(&sqp->s_lock, flags);
3249         rvt_send_complete(sqp, wqe, send_status);
3250         if (sqp->ibqp.qp_type == IB_QPT_RC) {
3251                 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3252 
3253                 sqp->s_flags &= ~RVT_S_BUSY;
3254                 spin_unlock_irqrestore(&sqp->s_lock, flags);
3255                 if (lastwqe) {
3256                         struct ib_event ev;
3257 
3258                         ev.device = sqp->ibqp.device;
3259                         ev.element.qp = &sqp->ibqp;
3260                         ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3261                         sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3262                 }
3263                 goto done;
3264         }
3265 clr_busy:
3266         sqp->s_flags &= ~RVT_S_BUSY;
3267 unlock:
3268         spin_unlock_irqrestore(&sqp->s_lock, flags);
3269 done:
3270         rcu_read_unlock();
3271 }
3272 EXPORT_SYMBOL(rvt_ruc_loopback);

/* [<][>][^][v][top][bottom][index][help] */