root/drivers/net/ethernet/mellanox/mlx5/core/eq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_cmd_destroy_eq
  2. mlx5_eq_cq_get
  3. mlx5_eq_comp_int
  4. mlx5_eq_poll_irq_disabled
  5. mlx5_eq_async_int
  6. init_eq_buf
  7. create_map_eq
  8. mlx5_eq_enable
  9. mlx5_eq_disable
  10. destroy_unmap_eq
  11. mlx5_eq_add_cq
  12. mlx5_eq_del_cq
  13. mlx5_eq_table_init
  14. mlx5_eq_table_cleanup
  15. create_async_eq
  16. destroy_async_eq
  17. cq_err_event_notifier
  18. gather_user_async_events
  19. gather_async_events_mask
  20. create_async_eqs
  21. destroy_async_eqs
  22. mlx5_get_async_eq
  23. mlx5_eq_synchronize_async_irq
  24. mlx5_eq_synchronize_cmd_irq
  25. mlx5_eq_create_generic
  26. mlx5_eq_destroy_generic
  27. mlx5_eq_get_eqe
  28. mlx5_eq_update_ci
  29. destroy_comp_eqs
  30. create_comp_eqs
  31. mlx5_vector2eqn
  32. mlx5_comp_vectors_count
  33. mlx5_comp_irq_get_affinity_mask
  34. mlx5_eq_table_get_rmap
  35. mlx5_eqn2comp_eq
  36. mlx5_core_eq_free_irqs
  37. mlx5_eq_table_create
  38. mlx5_eq_table_destroy
  39. mlx5_eq_notifier_register
  40. mlx5_eq_notifier_unregister

   1 /*
   2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/interrupt.h>
  34 #include <linux/notifier.h>
  35 #include <linux/module.h>
  36 #include <linux/mlx5/driver.h>
  37 #include <linux/mlx5/vport.h>
  38 #include <linux/mlx5/eq.h>
  39 #include <linux/mlx5/cmd.h>
  40 #ifdef CONFIG_RFS_ACCEL
  41 #include <linux/cpu_rmap.h>
  42 #endif
  43 #include "mlx5_core.h"
  44 #include "lib/eq.h"
  45 #include "fpga/core.h"
  46 #include "eswitch.h"
  47 #include "lib/clock.h"
  48 #include "diag/fw_tracer.h"
  49 
  50 enum {
  51         MLX5_EQE_OWNER_INIT_VAL = 0x1,
  52 };
  53 
  54 enum {
  55         MLX5_EQ_STATE_ARMED             = 0x9,
  56         MLX5_EQ_STATE_FIRED             = 0xa,
  57         MLX5_EQ_STATE_ALWAYS_ARMED      = 0xb,
  58 };
  59 
  60 enum {
  61         MLX5_EQ_DOORBEL_OFFSET  = 0x40,
  62 };
  63 
  64 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
  65  * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
  66  * used to set the EQ size, budget must be smaller than the EQ size.
  67  */
  68 enum {
  69         MLX5_EQ_POLLING_BUDGET  = 128,
  70 };
  71 
  72 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
  73 
  74 struct mlx5_eq_table {
  75         struct list_head        comp_eqs_list;
  76         struct mlx5_eq_async    pages_eq;
  77         struct mlx5_eq_async    cmd_eq;
  78         struct mlx5_eq_async    async_eq;
  79 
  80         struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
  81 
  82         /* Since CQ DB is stored in async_eq */
  83         struct mlx5_nb          cq_err_nb;
  84 
  85         struct mutex            lock; /* sync async eqs creations */
  86         int                     num_comp_eqs;
  87         struct mlx5_irq_table   *irq_table;
  88 };
  89 
  90 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)           | \
  91                                (1ull << MLX5_EVENT_TYPE_COMM_EST)           | \
  92                                (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)         | \
  93                                (1ull << MLX5_EVENT_TYPE_CQ_ERROR)           | \
  94                                (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  95                                (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
  96                                (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  97                                (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  98                                (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)        | \
  99                                (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
 100                                (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)       | \
 101                                (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
 102 
 103 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
 104 {
 105         u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
 106         u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]   = {0};
 107 
 108         MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
 109         MLX5_SET(destroy_eq_in, in, eq_number, eqn);
 110         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 111 }
 112 
 113 /* caller must eventually call mlx5_cq_put on the returned cq */
 114 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
 115 {
 116         struct mlx5_cq_table *table = &eq->cq_table;
 117         struct mlx5_core_cq *cq = NULL;
 118 
 119         rcu_read_lock();
 120         cq = radix_tree_lookup(&table->tree, cqn);
 121         if (likely(cq))
 122                 mlx5_cq_hold(cq);
 123         rcu_read_unlock();
 124 
 125         return cq;
 126 }
 127 
 128 static int mlx5_eq_comp_int(struct notifier_block *nb,
 129                             __always_unused unsigned long action,
 130                             __always_unused void *data)
 131 {
 132         struct mlx5_eq_comp *eq_comp =
 133                 container_of(nb, struct mlx5_eq_comp, irq_nb);
 134         struct mlx5_eq *eq = &eq_comp->core;
 135         struct mlx5_eqe *eqe;
 136         int num_eqes = 0;
 137         u32 cqn = -1;
 138 
 139         eqe = next_eqe_sw(eq);
 140         if (!eqe)
 141                 goto out;
 142 
 143         do {
 144                 struct mlx5_core_cq *cq;
 145 
 146                 /* Make sure we read EQ entry contents after we've
 147                  * checked the ownership bit.
 148                  */
 149                 dma_rmb();
 150                 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
 151                 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 152 
 153                 cq = mlx5_eq_cq_get(eq, cqn);
 154                 if (likely(cq)) {
 155                         ++cq->arm_sn;
 156                         cq->comp(cq, eqe);
 157                         mlx5_cq_put(cq);
 158                 } else {
 159                         mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
 160                 }
 161 
 162                 ++eq->cons_index;
 163 
 164         } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
 165 
 166 out:
 167         eq_update_ci(eq, 1);
 168 
 169         if (cqn != -1)
 170                 tasklet_schedule(&eq_comp->tasklet_ctx.task);
 171 
 172         return 0;
 173 }
 174 
 175 /* Some architectures don't latch interrupts when they are disabled, so using
 176  * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
 177  * avoid losing them.  It is not recommended to use it, unless this is the last
 178  * resort.
 179  */
 180 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
 181 {
 182         u32 count_eqe;
 183 
 184         disable_irq(eq->core.irqn);
 185         count_eqe = eq->core.cons_index;
 186         mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
 187         count_eqe = eq->core.cons_index - count_eqe;
 188         enable_irq(eq->core.irqn);
 189 
 190         return count_eqe;
 191 }
 192 
 193 static int mlx5_eq_async_int(struct notifier_block *nb,
 194                              unsigned long action, void *data)
 195 {
 196         struct mlx5_eq_async *eq_async =
 197                 container_of(nb, struct mlx5_eq_async, irq_nb);
 198         struct mlx5_eq *eq = &eq_async->core;
 199         struct mlx5_eq_table *eqt;
 200         struct mlx5_core_dev *dev;
 201         struct mlx5_eqe *eqe;
 202         int num_eqes = 0;
 203 
 204         dev = eq->dev;
 205         eqt = dev->priv.eq_table;
 206 
 207         eqe = next_eqe_sw(eq);
 208         if (!eqe)
 209                 goto out;
 210 
 211         do {
 212                 /*
 213                  * Make sure we read EQ entry contents after we've
 214                  * checked the ownership bit.
 215                  */
 216                 dma_rmb();
 217 
 218                 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
 219                 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
 220 
 221                 ++eq->cons_index;
 222 
 223         } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
 224 
 225 out:
 226         eq_update_ci(eq, 1);
 227 
 228         return 0;
 229 }
 230 
 231 static void init_eq_buf(struct mlx5_eq *eq)
 232 {
 233         struct mlx5_eqe *eqe;
 234         int i;
 235 
 236         for (i = 0; i < eq->nent; i++) {
 237                 eqe = get_eqe(eq, i);
 238                 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
 239         }
 240 }
 241 
 242 static int
 243 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 244               struct mlx5_eq_param *param)
 245 {
 246         struct mlx5_cq_table *cq_table = &eq->cq_table;
 247         u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
 248         struct mlx5_priv *priv = &dev->priv;
 249         u8 vecidx = param->irq_index;
 250         __be64 *pas;
 251         void *eqc;
 252         int inlen;
 253         u32 *in;
 254         int err;
 255         int i;
 256 
 257         /* Init CQ table */
 258         memset(cq_table, 0, sizeof(*cq_table));
 259         spin_lock_init(&cq_table->lock);
 260         INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
 261 
 262         eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
 263         eq->cons_index = 0;
 264         err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 265         if (err)
 266                 return err;
 267 
 268         init_eq_buf(eq);
 269 
 270         inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
 271                 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
 272 
 273         in = kvzalloc(inlen, GFP_KERNEL);
 274         if (!in) {
 275                 err = -ENOMEM;
 276                 goto err_buf;
 277         }
 278 
 279         pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
 280         mlx5_fill_page_array(&eq->buf, pas);
 281 
 282         MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
 283         if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
 284                 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
 285 
 286         for (i = 0; i < 4; i++)
 287                 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
 288                                  param->mask[i]);
 289 
 290         eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
 291         MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
 292         MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
 293         MLX5_SET(eqc, eqc, intr, vecidx);
 294         MLX5_SET(eqc, eqc, log_page_size,
 295                  eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 296 
 297         err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 298         if (err)
 299                 goto err_in;
 300 
 301         eq->vecidx = vecidx;
 302         eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
 303         eq->irqn = pci_irq_vector(dev->pdev, vecidx);
 304         eq->dev = dev;
 305         eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
 306 
 307         err = mlx5_debug_eq_add(dev, eq);
 308         if (err)
 309                 goto err_eq;
 310 
 311         kvfree(in);
 312         return 0;
 313 
 314 err_eq:
 315         mlx5_cmd_destroy_eq(dev, eq->eqn);
 316 
 317 err_in:
 318         kvfree(in);
 319 
 320 err_buf:
 321         mlx5_buf_free(dev, &eq->buf);
 322         return err;
 323 }
 324 
 325 /**
 326  * mlx5_eq_enable - Enable EQ for receiving EQEs
 327  * @dev : Device which owns the eq
 328  * @eq  : EQ to enable
 329  * @nb  : Notifier call block
 330  *
 331  * Must be called after EQ is created in device.
 332  *
 333  * @return: 0 if no error
 334  */
 335 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 336                    struct notifier_block *nb)
 337 {
 338         struct mlx5_eq_table *eq_table = dev->priv.eq_table;
 339         int err;
 340 
 341         err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
 342         if (!err)
 343                 eq_update_ci(eq, 1);
 344 
 345         return err;
 346 }
 347 EXPORT_SYMBOL(mlx5_eq_enable);
 348 
 349 /**
 350  * mlx5_eq_disable - Disable EQ for receiving EQEs
 351  * @dev : Device which owns the eq
 352  * @eq  : EQ to disable
 353  * @nb  : Notifier call block
 354  *
 355  * Must be called before EQ is destroyed.
 356  */
 357 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 358                      struct notifier_block *nb)
 359 {
 360         struct mlx5_eq_table *eq_table = dev->priv.eq_table;
 361 
 362         mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
 363 }
 364 EXPORT_SYMBOL(mlx5_eq_disable);
 365 
 366 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 367 {
 368         int err;
 369 
 370         mlx5_debug_eq_remove(dev, eq);
 371 
 372         err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 373         if (err)
 374                 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 375                                eq->eqn);
 376         synchronize_irq(eq->irqn);
 377 
 378         mlx5_buf_free(dev, &eq->buf);
 379 
 380         return err;
 381 }
 382 
 383 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
 384 {
 385         struct mlx5_cq_table *table = &eq->cq_table;
 386         int err;
 387 
 388         spin_lock(&table->lock);
 389         err = radix_tree_insert(&table->tree, cq->cqn, cq);
 390         spin_unlock(&table->lock);
 391 
 392         return err;
 393 }
 394 
 395 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
 396 {
 397         struct mlx5_cq_table *table = &eq->cq_table;
 398         struct mlx5_core_cq *tmp;
 399 
 400         spin_lock(&table->lock);
 401         tmp = radix_tree_delete(&table->tree, cq->cqn);
 402         spin_unlock(&table->lock);
 403 
 404         if (!tmp) {
 405                 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
 406                               eq->eqn, cq->cqn);
 407                 return;
 408         }
 409 
 410         if (tmp != cq)
 411                 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
 412                               eq->eqn, cq->cqn);
 413 }
 414 
 415 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
 416 {
 417         struct mlx5_eq_table *eq_table;
 418         int i;
 419 
 420         eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
 421         if (!eq_table)
 422                 return -ENOMEM;
 423 
 424         dev->priv.eq_table = eq_table;
 425 
 426         mlx5_eq_debugfs_init(dev);
 427 
 428         mutex_init(&eq_table->lock);
 429         for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
 430                 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
 431 
 432         eq_table->irq_table = dev->priv.irq_table;
 433         return 0;
 434 }
 435 
 436 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
 437 {
 438         mlx5_eq_debugfs_cleanup(dev);
 439         kvfree(dev->priv.eq_table);
 440 }
 441 
 442 /* Async EQs */
 443 
 444 static int create_async_eq(struct mlx5_core_dev *dev,
 445                            struct mlx5_eq *eq, struct mlx5_eq_param *param)
 446 {
 447         struct mlx5_eq_table *eq_table = dev->priv.eq_table;
 448         int err;
 449 
 450         mutex_lock(&eq_table->lock);
 451         /* Async EQs must share irq index 0 */
 452         if (param->irq_index != 0) {
 453                 err = -EINVAL;
 454                 goto unlock;
 455         }
 456 
 457         err = create_map_eq(dev, eq, param);
 458 unlock:
 459         mutex_unlock(&eq_table->lock);
 460         return err;
 461 }
 462 
 463 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 464 {
 465         struct mlx5_eq_table *eq_table = dev->priv.eq_table;
 466         int err;
 467 
 468         mutex_lock(&eq_table->lock);
 469         err = destroy_unmap_eq(dev, eq);
 470         mutex_unlock(&eq_table->lock);
 471         return err;
 472 }
 473 
 474 static int cq_err_event_notifier(struct notifier_block *nb,
 475                                  unsigned long type, void *data)
 476 {
 477         struct mlx5_eq_table *eqt;
 478         struct mlx5_core_cq *cq;
 479         struct mlx5_eqe *eqe;
 480         struct mlx5_eq *eq;
 481         u32 cqn;
 482 
 483         /* type == MLX5_EVENT_TYPE_CQ_ERROR */
 484 
 485         eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
 486         eq  = &eqt->async_eq.core;
 487         eqe = data;
 488 
 489         cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 490         mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
 491                        cqn, eqe->data.cq_err.syndrome);
 492 
 493         cq = mlx5_eq_cq_get(eq, cqn);
 494         if (unlikely(!cq)) {
 495                 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
 496                 return NOTIFY_OK;
 497         }
 498 
 499         if (cq->event)
 500                 cq->event(cq, type);
 501 
 502         mlx5_cq_put(cq);
 503 
 504         return NOTIFY_OK;
 505 }
 506 
 507 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
 508 {
 509         __be64 *user_unaffiliated_events;
 510         __be64 *user_affiliated_events;
 511         int i;
 512 
 513         user_affiliated_events =
 514                 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
 515         user_unaffiliated_events =
 516                 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
 517 
 518         for (i = 0; i < 4; i++)
 519                 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
 520                                        user_unaffiliated_events[i]);
 521 }
 522 
 523 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
 524 {
 525         u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 526 
 527         if (MLX5_VPORT_MANAGER(dev))
 528                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
 529 
 530         if (MLX5_CAP_GEN(dev, general_notification_event))
 531                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
 532 
 533         if (MLX5_CAP_GEN(dev, port_module_event))
 534                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
 535         else
 536                 mlx5_core_dbg(dev, "port_module_event is not set\n");
 537 
 538         if (MLX5_PPS_CAP(dev))
 539                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
 540 
 541         if (MLX5_CAP_GEN(dev, fpga))
 542                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
 543                                     (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
 544         if (MLX5_CAP_GEN_MAX(dev, dct))
 545                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
 546 
 547         if (MLX5_CAP_GEN(dev, temp_warn_event))
 548                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
 549 
 550         if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
 551                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
 552 
 553         if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
 554                 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
 555 
 556         if (mlx5_eswitch_is_funcs_handler(dev))
 557                 async_event_mask |=
 558                         (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
 559 
 560         mask[0] = async_event_mask;
 561 
 562         if (MLX5_CAP_GEN(dev, event_cap))
 563                 gather_user_async_events(dev, mask);
 564 }
 565 
 566 static int create_async_eqs(struct mlx5_core_dev *dev)
 567 {
 568         struct mlx5_eq_table *table = dev->priv.eq_table;
 569         struct mlx5_eq_param param = {};
 570         int err;
 571 
 572         MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
 573         mlx5_eq_notifier_register(dev, &table->cq_err_nb);
 574 
 575         table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
 576         param = (struct mlx5_eq_param) {
 577                 .irq_index = 0,
 578                 .nent = MLX5_NUM_CMD_EQE,
 579         };
 580 
 581         param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
 582         err = create_async_eq(dev, &table->cmd_eq.core, &param);
 583         if (err) {
 584                 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
 585                 goto err0;
 586         }
 587         err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
 588         if (err) {
 589                 mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
 590                 goto err1;
 591         }
 592         mlx5_cmd_use_events(dev);
 593 
 594         table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
 595         param = (struct mlx5_eq_param) {
 596                 .irq_index = 0,
 597                 .nent = MLX5_NUM_ASYNC_EQE,
 598         };
 599 
 600         gather_async_events_mask(dev, param.mask);
 601         err = create_async_eq(dev, &table->async_eq.core, &param);
 602         if (err) {
 603                 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
 604                 goto err2;
 605         }
 606         err = mlx5_eq_enable(dev, &table->async_eq.core,
 607                              &table->async_eq.irq_nb);
 608         if (err) {
 609                 mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
 610                 goto err3;
 611         }
 612 
 613         table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
 614         param = (struct mlx5_eq_param) {
 615                 .irq_index = 0,
 616                 .nent = /* TODO: sriov max_vf + */ 1,
 617         };
 618 
 619         param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
 620         err = create_async_eq(dev, &table->pages_eq.core, &param);
 621         if (err) {
 622                 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
 623                 goto err4;
 624         }
 625         err = mlx5_eq_enable(dev, &table->pages_eq.core,
 626                              &table->pages_eq.irq_nb);
 627         if (err) {
 628                 mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
 629                 goto err5;
 630         }
 631 
 632         return err;
 633 
 634 err5:
 635         destroy_async_eq(dev, &table->pages_eq.core);
 636 err4:
 637         mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
 638 err3:
 639         destroy_async_eq(dev, &table->async_eq.core);
 640 err2:
 641         mlx5_cmd_use_polling(dev);
 642         mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
 643 err1:
 644         destroy_async_eq(dev, &table->cmd_eq.core);
 645 err0:
 646         mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
 647         return err;
 648 }
 649 
 650 static void destroy_async_eqs(struct mlx5_core_dev *dev)
 651 {
 652         struct mlx5_eq_table *table = dev->priv.eq_table;
 653         int err;
 654 
 655         mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
 656         err = destroy_async_eq(dev, &table->pages_eq.core);
 657         if (err)
 658                 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
 659                               err);
 660 
 661         mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
 662         err = destroy_async_eq(dev, &table->async_eq.core);
 663         if (err)
 664                 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
 665                               err);
 666 
 667         mlx5_cmd_use_polling(dev);
 668 
 669         mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
 670         err = destroy_async_eq(dev, &table->cmd_eq.core);
 671         if (err)
 672                 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
 673                               err);
 674 
 675         mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
 676 }
 677 
 678 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
 679 {
 680         return &dev->priv.eq_table->async_eq.core;
 681 }
 682 
 683 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
 684 {
 685         synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
 686 }
 687 
 688 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
 689 {
 690         synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
 691 }
 692 
 693 /* Generic EQ API for mlx5_core consumers
 694  * Needed For RDMA ODP EQ for now
 695  */
 696 struct mlx5_eq *
 697 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
 698                        struct mlx5_eq_param *param)
 699 {
 700         struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
 701         int err;
 702 
 703         if (!eq)
 704                 return ERR_PTR(-ENOMEM);
 705 
 706         err = create_async_eq(dev, eq, param);
 707         if (err) {
 708                 kvfree(eq);
 709                 eq = ERR_PTR(err);
 710         }
 711 
 712         return eq;
 713 }
 714 EXPORT_SYMBOL(mlx5_eq_create_generic);
 715 
 716 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 717 {
 718         int err;
 719 
 720         if (IS_ERR(eq))
 721                 return -EINVAL;
 722 
 723         err = destroy_async_eq(dev, eq);
 724         if (err)
 725                 goto out;
 726 
 727         kvfree(eq);
 728 out:
 729         return err;
 730 }
 731 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
 732 
 733 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
 734 {
 735         u32 ci = eq->cons_index + cc;
 736         struct mlx5_eqe *eqe;
 737 
 738         eqe = get_eqe(eq, ci & (eq->nent - 1));
 739         eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
 740         /* Make sure we read EQ entry contents after we've
 741          * checked the ownership bit.
 742          */
 743         if (eqe)
 744                 dma_rmb();
 745 
 746         return eqe;
 747 }
 748 EXPORT_SYMBOL(mlx5_eq_get_eqe);
 749 
 750 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
 751 {
 752         __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
 753         u32 val;
 754 
 755         eq->cons_index += cc;
 756         val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
 757 
 758         __raw_writel((__force u32)cpu_to_be32(val), addr);
 759         /* We still want ordering, just not swabbing, so add a barrier */
 760         wmb();
 761 }
 762 EXPORT_SYMBOL(mlx5_eq_update_ci);
 763 
 764 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
 765 {
 766         struct mlx5_eq_table *table = dev->priv.eq_table;
 767         struct mlx5_eq_comp *eq, *n;
 768 
 769         list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
 770                 list_del(&eq->list);
 771                 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
 772                 if (destroy_unmap_eq(dev, &eq->core))
 773                         mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
 774                                        eq->core.eqn);
 775                 tasklet_disable(&eq->tasklet_ctx.task);
 776                 kfree(eq);
 777         }
 778 }
 779 
 780 static int create_comp_eqs(struct mlx5_core_dev *dev)
 781 {
 782         struct mlx5_eq_table *table = dev->priv.eq_table;
 783         struct mlx5_eq_comp *eq;
 784         int ncomp_eqs;
 785         int nent;
 786         int err;
 787         int i;
 788 
 789         INIT_LIST_HEAD(&table->comp_eqs_list);
 790         ncomp_eqs = table->num_comp_eqs;
 791         nent = MLX5_COMP_EQ_SIZE;
 792         for (i = 0; i < ncomp_eqs; i++) {
 793                 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
 794                 struct mlx5_eq_param param = {};
 795 
 796                 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 797                 if (!eq) {
 798                         err = -ENOMEM;
 799                         goto clean;
 800                 }
 801 
 802                 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
 803                 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
 804                 spin_lock_init(&eq->tasklet_ctx.lock);
 805                 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
 806                              (unsigned long)&eq->tasklet_ctx);
 807 
 808                 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
 809                 param = (struct mlx5_eq_param) {
 810                         .irq_index = vecidx,
 811                         .nent = nent,
 812                 };
 813                 err = create_map_eq(dev, &eq->core, &param);
 814                 if (err) {
 815                         kfree(eq);
 816                         goto clean;
 817                 }
 818                 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
 819                 if (err) {
 820                         destroy_unmap_eq(dev, &eq->core);
 821                         kfree(eq);
 822                         goto clean;
 823                 }
 824 
 825                 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
 826                 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
 827                 list_add_tail(&eq->list, &table->comp_eqs_list);
 828         }
 829 
 830         return 0;
 831 
 832 clean:
 833         destroy_comp_eqs(dev);
 834         return err;
 835 }
 836 
 837 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 838                     unsigned int *irqn)
 839 {
 840         struct mlx5_eq_table *table = dev->priv.eq_table;
 841         struct mlx5_eq_comp *eq, *n;
 842         int err = -ENOENT;
 843         int i = 0;
 844 
 845         list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
 846                 if (i++ == vector) {
 847                         *eqn = eq->core.eqn;
 848                         *irqn = eq->core.irqn;
 849                         err = 0;
 850                         break;
 851                 }
 852         }
 853 
 854         return err;
 855 }
 856 EXPORT_SYMBOL(mlx5_vector2eqn);
 857 
 858 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
 859 {
 860         return dev->priv.eq_table->num_comp_eqs;
 861 }
 862 EXPORT_SYMBOL(mlx5_comp_vectors_count);
 863 
 864 struct cpumask *
 865 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
 866 {
 867         int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
 868 
 869         return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
 870                                           vecidx);
 871 }
 872 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
 873 
 874 #ifdef CONFIG_RFS_ACCEL
 875 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
 876 {
 877         return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
 878 }
 879 #endif
 880 
 881 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
 882 {
 883         struct mlx5_eq_table *table = dev->priv.eq_table;
 884         struct mlx5_eq_comp *eq;
 885 
 886         list_for_each_entry(eq, &table->comp_eqs_list, list) {
 887                 if (eq->core.eqn == eqn)
 888                         return eq;
 889         }
 890 
 891         return ERR_PTR(-ENOENT);
 892 }
 893 
 894 /* This function should only be called after mlx5_cmd_force_teardown_hca */
 895 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
 896 {
 897         struct mlx5_eq_table *table = dev->priv.eq_table;
 898 
 899         mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
 900         mlx5_irq_table_destroy(dev);
 901         mutex_unlock(&table->lock);
 902 }
 903 
 904 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
 905 {
 906         struct mlx5_eq_table *eq_table = dev->priv.eq_table;
 907         int err;
 908 
 909         eq_table->num_comp_eqs =
 910                 mlx5_irq_get_num_comp(eq_table->irq_table);
 911 
 912         err = create_async_eqs(dev);
 913         if (err) {
 914                 mlx5_core_err(dev, "Failed to create async EQs\n");
 915                 goto err_async_eqs;
 916         }
 917 
 918         err = create_comp_eqs(dev);
 919         if (err) {
 920                 mlx5_core_err(dev, "Failed to create completion EQs\n");
 921                 goto err_comp_eqs;
 922         }
 923 
 924         return 0;
 925 err_comp_eqs:
 926         destroy_async_eqs(dev);
 927 err_async_eqs:
 928         return err;
 929 }
 930 
 931 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
 932 {
 933         destroy_comp_eqs(dev);
 934         destroy_async_eqs(dev);
 935 }
 936 
 937 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
 938 {
 939         struct mlx5_eq_table *eqt = dev->priv.eq_table;
 940 
 941         return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
 942 }
 943 EXPORT_SYMBOL(mlx5_eq_notifier_register);
 944 
 945 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
 946 {
 947         struct mlx5_eq_table *eqt = dev->priv.eq_table;
 948 
 949         return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
 950 }
 951 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);

/* [<][>][^][v][top][bottom][index][help] */