root/drivers/net/ethernet/amazon/ena/ena_com.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ena_com_mem_addr_set
  2. ena_com_admin_init_sq
  3. ena_com_admin_init_cq
  4. ena_com_admin_init_aenq
  5. comp_ctxt_release
  6. get_comp_ctxt
  7. __ena_com_submit_admin_cmd
  8. ena_com_init_comp_ctxt
  9. ena_com_submit_admin_cmd
  10. ena_com_init_io_sq
  11. ena_com_init_io_cq
  12. ena_com_handle_single_admin_completion
  13. ena_com_handle_admin_completion
  14. ena_com_comp_status_to_errno
  15. ena_com_wait_and_process_admin_cq_polling
  16. ena_com_set_llq
  17. ena_com_config_llq_info
  18. ena_com_wait_and_process_admin_cq_interrupts
  19. ena_com_reg_bar_read32
  20. ena_com_wait_and_process_admin_cq
  21. ena_com_destroy_io_sq
  22. ena_com_io_queue_free
  23. wait_for_reset_state
  24. ena_com_check_supported_feature_id
  25. ena_com_get_feature_ex
  26. ena_com_get_feature
  27. ena_com_hash_key_fill_default_key
  28. ena_com_get_current_hash_function
  29. ena_com_hash_key_allocate
  30. ena_com_hash_key_destroy
  31. ena_com_hash_ctrl_init
  32. ena_com_hash_ctrl_destroy
  33. ena_com_indirect_table_allocate
  34. ena_com_indirect_table_destroy
  35. ena_com_create_io_sq
  36. ena_com_ind_tbl_convert_to_device
  37. ena_com_update_intr_delay_resolution
  38. ena_com_execute_admin_command
  39. ena_com_create_io_cq
  40. ena_com_get_io_handlers
  41. ena_com_abort_admin_commands
  42. ena_com_wait_for_abort_completion
  43. ena_com_destroy_io_cq
  44. ena_com_get_admin_running_state
  45. ena_com_set_admin_running_state
  46. ena_com_admin_aenq_enable
  47. ena_com_set_aenq_config
  48. ena_com_get_dma_width
  49. ena_com_validate_version
  50. ena_com_admin_destroy
  51. ena_com_set_admin_polling_mode
  52. ena_com_set_admin_auto_polling_mode
  53. ena_com_mmio_reg_read_request_init
  54. ena_com_set_mmio_read_mode
  55. ena_com_mmio_reg_read_request_destroy
  56. ena_com_mmio_reg_read_request_write_dev_addr
  57. ena_com_admin_init
  58. ena_com_create_io_queue
  59. ena_com_destroy_io_queue
  60. ena_com_get_link_params
  61. ena_com_get_dev_attr_feat
  62. ena_com_admin_q_comp_intr_handler
  63. ena_com_get_specific_aenq_cb
  64. ena_com_aenq_intr_handler
  65. ena_com_dev_reset
  66. ena_get_dev_stats
  67. ena_com_get_dev_basic_stats
  68. ena_com_set_dev_mtu
  69. ena_com_get_offload_settings
  70. ena_com_set_hash_function
  71. ena_com_fill_hash_function
  72. ena_com_get_hash_function
  73. ena_com_get_hash_ctrl
  74. ena_com_set_hash_ctrl
  75. ena_com_set_default_hash_ctrl
  76. ena_com_fill_hash_ctrl
  77. ena_com_indirect_table_fill_entry
  78. ena_com_indirect_table_set
  79. ena_com_indirect_table_get
  80. ena_com_rss_init
  81. ena_com_rss_destroy
  82. ena_com_allocate_host_info
  83. ena_com_allocate_debug_area
  84. ena_com_delete_host_info
  85. ena_com_delete_debug_area
  86. ena_com_set_host_attributes
  87. ena_com_interrupt_moderation_supported
  88. ena_com_update_nonadaptive_moderation_interval
  89. ena_com_update_nonadaptive_moderation_interval_tx
  90. ena_com_update_nonadaptive_moderation_interval_rx
  91. ena_com_init_interrupt_moderation
  92. ena_com_get_nonadaptive_moderation_interval_tx
  93. ena_com_get_nonadaptive_moderation_interval_rx
  94. ena_com_config_dev_mode

   1 /*
   2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include "ena_com.h"
  34 
  35 /*****************************************************************************/
  36 /*****************************************************************************/
  37 
  38 /* Timeout in micro-sec */
  39 #define ADMIN_CMD_TIMEOUT_US (3000000)
  40 
  41 #define ENA_ASYNC_QUEUE_DEPTH 16
  42 #define ENA_ADMIN_QUEUE_DEPTH 32
  43 
  44 
  45 #define ENA_CTRL_MAJOR          0
  46 #define ENA_CTRL_MINOR          0
  47 #define ENA_CTRL_SUB_MINOR      1
  48 
  49 #define MIN_ENA_CTRL_VER \
  50         (((ENA_CTRL_MAJOR) << \
  51         (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  52         ((ENA_CTRL_MINOR) << \
  53         (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  54         (ENA_CTRL_SUB_MINOR))
  55 
  56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
  57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))
  58 
  59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  60 
  61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
  62 
  63 #define ENA_REGS_ADMIN_INTR_MASK 1
  64 
  65 #define ENA_POLL_MS     5
  66 
  67 /*****************************************************************************/
  68 /*****************************************************************************/
  69 /*****************************************************************************/
  70 
  71 enum ena_cmd_status {
  72         ENA_CMD_SUBMITTED,
  73         ENA_CMD_COMPLETED,
  74         /* Abort - canceled by the driver */
  75         ENA_CMD_ABORTED,
  76 };
  77 
  78 struct ena_comp_ctx {
  79         struct completion wait_event;
  80         struct ena_admin_acq_entry *user_cqe;
  81         u32 comp_size;
  82         enum ena_cmd_status status;
  83         /* status from the device */
  84         u8 comp_status;
  85         u8 cmd_opcode;
  86         bool occupied;
  87 };
  88 
  89 struct ena_com_stats_ctx {
  90         struct ena_admin_aq_get_stats_cmd get_cmd;
  91         struct ena_admin_acq_get_stats_resp get_resp;
  92 };
  93 
  94 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  95                                        struct ena_common_mem_addr *ena_addr,
  96                                        dma_addr_t addr)
  97 {
  98         if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  99                 pr_err("dma address has more bits that the device supports\n");
 100                 return -EINVAL;
 101         }
 102 
 103         ena_addr->mem_addr_low = lower_32_bits(addr);
 104         ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
 105 
 106         return 0;
 107 }
 108 
 109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
 110 {
 111         struct ena_com_admin_sq *sq = &queue->sq;
 112         u16 size = ADMIN_SQ_SIZE(queue->q_depth);
 113 
 114         sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
 115                                          GFP_KERNEL);
 116 
 117         if (!sq->entries) {
 118                 pr_err("memory allocation failed\n");
 119                 return -ENOMEM;
 120         }
 121 
 122         sq->head = 0;
 123         sq->tail = 0;
 124         sq->phase = 1;
 125 
 126         sq->db_addr = NULL;
 127 
 128         return 0;
 129 }
 130 
 131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
 132 {
 133         struct ena_com_admin_cq *cq = &queue->cq;
 134         u16 size = ADMIN_CQ_SIZE(queue->q_depth);
 135 
 136         cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
 137                                          GFP_KERNEL);
 138 
 139         if (!cq->entries) {
 140                 pr_err("memory allocation failed\n");
 141                 return -ENOMEM;
 142         }
 143 
 144         cq->head = 0;
 145         cq->phase = 1;
 146 
 147         return 0;
 148 }
 149 
 150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
 151                                    struct ena_aenq_handlers *aenq_handlers)
 152 {
 153         struct ena_com_aenq *aenq = &dev->aenq;
 154         u32 addr_low, addr_high, aenq_caps;
 155         u16 size;
 156 
 157         dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
 158         size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
 159         aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
 160                                            GFP_KERNEL);
 161 
 162         if (!aenq->entries) {
 163                 pr_err("memory allocation failed\n");
 164                 return -ENOMEM;
 165         }
 166 
 167         aenq->head = aenq->q_depth;
 168         aenq->phase = 1;
 169 
 170         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
 171         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
 172 
 173         writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
 174         writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
 175 
 176         aenq_caps = 0;
 177         aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
 178         aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
 179                       << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
 180                      ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
 181         writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
 182 
 183         if (unlikely(!aenq_handlers)) {
 184                 pr_err("aenq handlers pointer is NULL\n");
 185                 return -EINVAL;
 186         }
 187 
 188         aenq->aenq_handlers = aenq_handlers;
 189 
 190         return 0;
 191 }
 192 
 193 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
 194                                      struct ena_comp_ctx *comp_ctx)
 195 {
 196         comp_ctx->occupied = false;
 197         atomic_dec(&queue->outstanding_cmds);
 198 }
 199 
 200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
 201                                           u16 command_id, bool capture)
 202 {
 203         if (unlikely(!queue->comp_ctx)) {
 204                 pr_err("Completion context is NULL\n");
 205                 return NULL;
 206         }
 207 
 208         if (unlikely(command_id >= queue->q_depth)) {
 209                 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
 210                        command_id, queue->q_depth);
 211                 return NULL;
 212         }
 213 
 214         if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
 215                 pr_err("Completion context is occupied\n");
 216                 return NULL;
 217         }
 218 
 219         if (capture) {
 220                 atomic_inc(&queue->outstanding_cmds);
 221                 queue->comp_ctx[command_id].occupied = true;
 222         }
 223 
 224         return &queue->comp_ctx[command_id];
 225 }
 226 
 227 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 228                                                        struct ena_admin_aq_entry *cmd,
 229                                                        size_t cmd_size_in_bytes,
 230                                                        struct ena_admin_acq_entry *comp,
 231                                                        size_t comp_size_in_bytes)
 232 {
 233         struct ena_comp_ctx *comp_ctx;
 234         u16 tail_masked, cmd_id;
 235         u16 queue_size_mask;
 236         u16 cnt;
 237 
 238         queue_size_mask = admin_queue->q_depth - 1;
 239 
 240         tail_masked = admin_queue->sq.tail & queue_size_mask;
 241 
 242         /* In case of queue FULL */
 243         cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
 244         if (cnt >= admin_queue->q_depth) {
 245                 pr_debug("admin queue is full.\n");
 246                 admin_queue->stats.out_of_space++;
 247                 return ERR_PTR(-ENOSPC);
 248         }
 249 
 250         cmd_id = admin_queue->curr_cmd_id;
 251 
 252         cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
 253                 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 254 
 255         cmd->aq_common_descriptor.command_id |= cmd_id &
 256                 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 257 
 258         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
 259         if (unlikely(!comp_ctx))
 260                 return ERR_PTR(-EINVAL);
 261 
 262         comp_ctx->status = ENA_CMD_SUBMITTED;
 263         comp_ctx->comp_size = (u32)comp_size_in_bytes;
 264         comp_ctx->user_cqe = comp;
 265         comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
 266 
 267         reinit_completion(&comp_ctx->wait_event);
 268 
 269         memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
 270 
 271         admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
 272                 queue_size_mask;
 273 
 274         admin_queue->sq.tail++;
 275         admin_queue->stats.submitted_cmd++;
 276 
 277         if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
 278                 admin_queue->sq.phase = !admin_queue->sq.phase;
 279 
 280         writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
 281 
 282         return comp_ctx;
 283 }
 284 
 285 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
 286 {
 287         size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
 288         struct ena_comp_ctx *comp_ctx;
 289         u16 i;
 290 
 291         queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
 292         if (unlikely(!queue->comp_ctx)) {
 293                 pr_err("memory allocation failed\n");
 294                 return -ENOMEM;
 295         }
 296 
 297         for (i = 0; i < queue->q_depth; i++) {
 298                 comp_ctx = get_comp_ctxt(queue, i, false);
 299                 if (comp_ctx)
 300                         init_completion(&comp_ctx->wait_event);
 301         }
 302 
 303         return 0;
 304 }
 305 
 306 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 307                                                      struct ena_admin_aq_entry *cmd,
 308                                                      size_t cmd_size_in_bytes,
 309                                                      struct ena_admin_acq_entry *comp,
 310                                                      size_t comp_size_in_bytes)
 311 {
 312         unsigned long flags = 0;
 313         struct ena_comp_ctx *comp_ctx;
 314 
 315         spin_lock_irqsave(&admin_queue->q_lock, flags);
 316         if (unlikely(!admin_queue->running_state)) {
 317                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 318                 return ERR_PTR(-ENODEV);
 319         }
 320         comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
 321                                               cmd_size_in_bytes,
 322                                               comp,
 323                                               comp_size_in_bytes);
 324         if (IS_ERR(comp_ctx))
 325                 admin_queue->running_state = false;
 326         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 327 
 328         return comp_ctx;
 329 }
 330 
 331 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 332                               struct ena_com_create_io_ctx *ctx,
 333                               struct ena_com_io_sq *io_sq)
 334 {
 335         size_t size;
 336         int dev_node = 0;
 337 
 338         memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 339 
 340         io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 341         io_sq->desc_entry_size =
 342                 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 343                 sizeof(struct ena_eth_io_tx_desc) :
 344                 sizeof(struct ena_eth_io_rx_desc);
 345 
 346         size = io_sq->desc_entry_size * io_sq->q_depth;
 347 
 348         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
 349                 dev_node = dev_to_node(ena_dev->dmadev);
 350                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
 351                 io_sq->desc_addr.virt_addr =
 352                         dma_alloc_coherent(ena_dev->dmadev, size,
 353                                            &io_sq->desc_addr.phys_addr,
 354                                            GFP_KERNEL);
 355                 set_dev_node(ena_dev->dmadev, dev_node);
 356                 if (!io_sq->desc_addr.virt_addr) {
 357                         io_sq->desc_addr.virt_addr =
 358                                 dma_alloc_coherent(ena_dev->dmadev, size,
 359                                                    &io_sq->desc_addr.phys_addr,
 360                                                    GFP_KERNEL);
 361                 }
 362 
 363                 if (!io_sq->desc_addr.virt_addr) {
 364                         pr_err("memory allocation failed\n");
 365                         return -ENOMEM;
 366                 }
 367         }
 368 
 369         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 370                 /* Allocate bounce buffers */
 371                 io_sq->bounce_buf_ctrl.buffer_size =
 372                         ena_dev->llq_info.desc_list_entry_size;
 373                 io_sq->bounce_buf_ctrl.buffers_num =
 374                         ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 375                 io_sq->bounce_buf_ctrl.next_to_use = 0;
 376 
 377                 size = io_sq->bounce_buf_ctrl.buffer_size *
 378                          io_sq->bounce_buf_ctrl.buffers_num;
 379 
 380                 dev_node = dev_to_node(ena_dev->dmadev);
 381                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
 382                 io_sq->bounce_buf_ctrl.base_buffer =
 383                         devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 384                 set_dev_node(ena_dev->dmadev, dev_node);
 385                 if (!io_sq->bounce_buf_ctrl.base_buffer)
 386                         io_sq->bounce_buf_ctrl.base_buffer =
 387                                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
 388 
 389                 if (!io_sq->bounce_buf_ctrl.base_buffer) {
 390                         pr_err("bounce buffer memory allocation failed\n");
 391                         return -ENOMEM;
 392                 }
 393 
 394                 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
 395                        sizeof(io_sq->llq_info));
 396 
 397                 /* Initiate the first bounce buffer */
 398                 io_sq->llq_buf_ctrl.curr_bounce_buf =
 399                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 400                 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 401                        0x0, io_sq->llq_info.desc_list_entry_size);
 402                 io_sq->llq_buf_ctrl.descs_left_in_line =
 403                         io_sq->llq_info.descs_num_before_header;
 404 
 405                 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
 406                         io_sq->entries_in_tx_burst_left =
 407                                 io_sq->llq_info.max_entries_in_tx_burst;
 408         }
 409 
 410         io_sq->tail = 0;
 411         io_sq->next_to_comp = 0;
 412         io_sq->phase = 1;
 413 
 414         return 0;
 415 }
 416 
 417 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 418                               struct ena_com_create_io_ctx *ctx,
 419                               struct ena_com_io_cq *io_cq)
 420 {
 421         size_t size;
 422         int prev_node = 0;
 423 
 424         memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
 425 
 426         /* Use the basic completion descriptor for Rx */
 427         io_cq->cdesc_entry_size_in_bytes =
 428                 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 429                 sizeof(struct ena_eth_io_tx_cdesc) :
 430                 sizeof(struct ena_eth_io_rx_cdesc_base);
 431 
 432         size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 433 
 434         prev_node = dev_to_node(ena_dev->dmadev);
 435         set_dev_node(ena_dev->dmadev, ctx->numa_node);
 436         io_cq->cdesc_addr.virt_addr =
 437                 dma_alloc_coherent(ena_dev->dmadev, size,
 438                                    &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
 439         set_dev_node(ena_dev->dmadev, prev_node);
 440         if (!io_cq->cdesc_addr.virt_addr) {
 441                 io_cq->cdesc_addr.virt_addr =
 442                         dma_alloc_coherent(ena_dev->dmadev, size,
 443                                            &io_cq->cdesc_addr.phys_addr,
 444                                            GFP_KERNEL);
 445         }
 446 
 447         if (!io_cq->cdesc_addr.virt_addr) {
 448                 pr_err("memory allocation failed\n");
 449                 return -ENOMEM;
 450         }
 451 
 452         io_cq->phase = 1;
 453         io_cq->head = 0;
 454 
 455         return 0;
 456 }
 457 
 458 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
 459                                                    struct ena_admin_acq_entry *cqe)
 460 {
 461         struct ena_comp_ctx *comp_ctx;
 462         u16 cmd_id;
 463 
 464         cmd_id = cqe->acq_common_descriptor.command &
 465                 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 466 
 467         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
 468         if (unlikely(!comp_ctx)) {
 469                 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
 470                 admin_queue->running_state = false;
 471                 return;
 472         }
 473 
 474         comp_ctx->status = ENA_CMD_COMPLETED;
 475         comp_ctx->comp_status = cqe->acq_common_descriptor.status;
 476 
 477         if (comp_ctx->user_cqe)
 478                 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
 479 
 480         if (!admin_queue->polling)
 481                 complete(&comp_ctx->wait_event);
 482 }
 483 
 484 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
 485 {
 486         struct ena_admin_acq_entry *cqe = NULL;
 487         u16 comp_num = 0;
 488         u16 head_masked;
 489         u8 phase;
 490 
 491         head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
 492         phase = admin_queue->cq.phase;
 493 
 494         cqe = &admin_queue->cq.entries[head_masked];
 495 
 496         /* Go over all the completions */
 497         while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
 498                 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 499                 /* Do not read the rest of the completion entry before the
 500                  * phase bit was validated
 501                  */
 502                 dma_rmb();
 503                 ena_com_handle_single_admin_completion(admin_queue, cqe);
 504 
 505                 head_masked++;
 506                 comp_num++;
 507                 if (unlikely(head_masked == admin_queue->q_depth)) {
 508                         head_masked = 0;
 509                         phase = !phase;
 510                 }
 511 
 512                 cqe = &admin_queue->cq.entries[head_masked];
 513         }
 514 
 515         admin_queue->cq.head += comp_num;
 516         admin_queue->cq.phase = phase;
 517         admin_queue->sq.head += comp_num;
 518         admin_queue->stats.completed_cmd += comp_num;
 519 }
 520 
 521 static int ena_com_comp_status_to_errno(u8 comp_status)
 522 {
 523         if (unlikely(comp_status != 0))
 524                 pr_err("admin command failed[%u]\n", comp_status);
 525 
 526         if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
 527                 return -EINVAL;
 528 
 529         switch (comp_status) {
 530         case ENA_ADMIN_SUCCESS:
 531                 return 0;
 532         case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
 533                 return -ENOMEM;
 534         case ENA_ADMIN_UNSUPPORTED_OPCODE:
 535                 return -EOPNOTSUPP;
 536         case ENA_ADMIN_BAD_OPCODE:
 537         case ENA_ADMIN_MALFORMED_REQUEST:
 538         case ENA_ADMIN_ILLEGAL_PARAMETER:
 539         case ENA_ADMIN_UNKNOWN_ERROR:
 540                 return -EINVAL;
 541         }
 542 
 543         return 0;
 544 }
 545 
 546 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 547                                                      struct ena_com_admin_queue *admin_queue)
 548 {
 549         unsigned long flags = 0;
 550         unsigned long timeout;
 551         int ret;
 552 
 553         timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
 554 
 555         while (1) {
 556                 spin_lock_irqsave(&admin_queue->q_lock, flags);
 557                 ena_com_handle_admin_completion(admin_queue);
 558                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 559 
 560                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
 561                         break;
 562 
 563                 if (time_is_before_jiffies(timeout)) {
 564                         pr_err("Wait for completion (polling) timeout\n");
 565                         /* ENA didn't have any completion */
 566                         spin_lock_irqsave(&admin_queue->q_lock, flags);
 567                         admin_queue->stats.no_completion++;
 568                         admin_queue->running_state = false;
 569                         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 570 
 571                         ret = -ETIME;
 572                         goto err;
 573                 }
 574 
 575                 msleep(ENA_POLL_MS);
 576         }
 577 
 578         if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
 579                 pr_err("Command was aborted\n");
 580                 spin_lock_irqsave(&admin_queue->q_lock, flags);
 581                 admin_queue->stats.aborted_cmd++;
 582                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 583                 ret = -ENODEV;
 584                 goto err;
 585         }
 586 
 587         WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
 588              comp_ctx->status);
 589 
 590         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
 591 err:
 592         comp_ctxt_release(admin_queue, comp_ctx);
 593         return ret;
 594 }
 595 
 596 /**
 597  * Set the LLQ configurations of the firmware
 598  *
 599  * The driver provides only the enabled feature values to the device,
 600  * which in turn, checks if they are supported.
 601  */
 602 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
 603 {
 604         struct ena_com_admin_queue *admin_queue;
 605         struct ena_admin_set_feat_cmd cmd;
 606         struct ena_admin_set_feat_resp resp;
 607         struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 608         int ret;
 609 
 610         memset(&cmd, 0x0, sizeof(cmd));
 611         admin_queue = &ena_dev->admin_queue;
 612 
 613         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 614         cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
 615 
 616         cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
 617         cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
 618         cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
 619         cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
 620 
 621         ret = ena_com_execute_admin_command(admin_queue,
 622                                             (struct ena_admin_aq_entry *)&cmd,
 623                                             sizeof(cmd),
 624                                             (struct ena_admin_acq_entry *)&resp,
 625                                             sizeof(resp));
 626 
 627         if (unlikely(ret))
 628                 pr_err("Failed to set LLQ configurations: %d\n", ret);
 629 
 630         return ret;
 631 }
 632 
 633 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
 634                                    struct ena_admin_feature_llq_desc *llq_features,
 635                                    struct ena_llq_configurations *llq_default_cfg)
 636 {
 637         struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 638         u16 supported_feat;
 639         int rc;
 640 
 641         memset(llq_info, 0, sizeof(*llq_info));
 642 
 643         supported_feat = llq_features->header_location_ctrl_supported;
 644 
 645         if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
 646                 llq_info->header_location_ctrl =
 647                         llq_default_cfg->llq_header_location;
 648         } else {
 649                 pr_err("Invalid header location control, supported: 0x%x\n",
 650                        supported_feat);
 651                 return -EINVAL;
 652         }
 653 
 654         if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
 655                 supported_feat = llq_features->descriptors_stride_ctrl_supported;
 656                 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
 657                         llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
 658                 } else  {
 659                         if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
 660                                 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
 661                         } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
 662                                 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
 663                         } else {
 664                                 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
 665                                        supported_feat);
 666                                 return -EINVAL;
 667                         }
 668 
 669                         pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 670                                llq_default_cfg->llq_stride_ctrl, supported_feat,
 671                                llq_info->desc_stride_ctrl);
 672                 }
 673         } else {
 674                 llq_info->desc_stride_ctrl = 0;
 675         }
 676 
 677         supported_feat = llq_features->entry_size_ctrl_supported;
 678         if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
 679                 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
 680                 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
 681         } else {
 682                 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
 683                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
 684                         llq_info->desc_list_entry_size = 128;
 685                 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
 686                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
 687                         llq_info->desc_list_entry_size = 192;
 688                 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
 689                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
 690                         llq_info->desc_list_entry_size = 256;
 691                 } else {
 692                         pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
 693                                supported_feat);
 694                         return -EINVAL;
 695                 }
 696 
 697                 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 698                        llq_default_cfg->llq_ring_entry_size, supported_feat,
 699                        llq_info->desc_list_entry_size);
 700         }
 701         if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 702                 /* The desc list entry size should be whole multiply of 8
 703                  * This requirement comes from __iowrite64_copy()
 704                  */
 705                 pr_err("illegal entry size %d\n",
 706                        llq_info->desc_list_entry_size);
 707                 return -EINVAL;
 708         }
 709 
 710         if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 711                 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 712                         sizeof(struct ena_eth_io_tx_desc);
 713         else
 714                 llq_info->descs_per_entry = 1;
 715 
 716         supported_feat = llq_features->desc_num_before_header_supported;
 717         if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
 718                 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
 719         } else {
 720                 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
 721                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
 722                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
 723                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
 724                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
 725                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
 726                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
 727                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
 728                 } else {
 729                         pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
 730                                supported_feat);
 731                         return -EINVAL;
 732                 }
 733 
 734                 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 735                        llq_default_cfg->llq_num_decs_before_header,
 736                        supported_feat, llq_info->descs_num_before_header);
 737         }
 738 
 739         llq_info->max_entries_in_tx_burst =
 740                 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
 741 
 742         rc = ena_com_set_llq(ena_dev);
 743         if (rc)
 744                 pr_err("Cannot set LLQ configuration: %d\n", rc);
 745 
 746         return rc;
 747 }
 748 
 749 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 750                                                         struct ena_com_admin_queue *admin_queue)
 751 {
 752         unsigned long flags = 0;
 753         int ret;
 754 
 755         wait_for_completion_timeout(&comp_ctx->wait_event,
 756                                     usecs_to_jiffies(
 757                                             admin_queue->completion_timeout));
 758 
 759         /* In case the command wasn't completed find out the root cause.
 760          * There might be 2 kinds of errors
 761          * 1) No completion (timeout reached)
 762          * 2) There is completion but the device didn't get any msi-x interrupt.
 763          */
 764         if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
 765                 spin_lock_irqsave(&admin_queue->q_lock, flags);
 766                 ena_com_handle_admin_completion(admin_queue);
 767                 admin_queue->stats.no_completion++;
 768                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
 769 
 770                 if (comp_ctx->status == ENA_CMD_COMPLETED) {
 771                         pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
 772                                comp_ctx->cmd_opcode,
 773                                admin_queue->auto_polling ? "ON" : "OFF");
 774                         /* Check if fallback to polling is enabled */
 775                         if (admin_queue->auto_polling)
 776                                 admin_queue->polling = true;
 777                 } else {
 778                         pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
 779                                comp_ctx->cmd_opcode, comp_ctx->status);
 780                 }
 781                 /* Check if shifted to polling mode.
 782                  * This will happen if there is a completion without an interrupt
 783                  * and autopolling mode is enabled. Continuing normal execution in such case
 784                  */
 785                 if (!admin_queue->polling) {
 786                         admin_queue->running_state = false;
 787                         ret = -ETIME;
 788                         goto err;
 789                 }
 790         }
 791 
 792         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
 793 err:
 794         comp_ctxt_release(admin_queue, comp_ctx);
 795         return ret;
 796 }
 797 
 798 /* This method read the hardware device register through posting writes
 799  * and waiting for response
 800  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
 801  */
 802 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
 803 {
 804         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 805         volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 806                 mmio_read->read_resp;
 807         u32 mmio_read_reg, ret, i;
 808         unsigned long flags = 0;
 809         u32 timeout = mmio_read->reg_read_to;
 810 
 811         might_sleep();
 812 
 813         if (timeout == 0)
 814                 timeout = ENA_REG_READ_TIMEOUT;
 815 
 816         /* If readless is disabled, perform regular read */
 817         if (!mmio_read->readless_supported)
 818                 return readl(ena_dev->reg_bar + offset);
 819 
 820         spin_lock_irqsave(&mmio_read->lock, flags);
 821         mmio_read->seq_num++;
 822 
 823         read_resp->req_id = mmio_read->seq_num + 0xDEAD;
 824         mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
 825                         ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
 826         mmio_read_reg |= mmio_read->seq_num &
 827                         ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 828 
 829         writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 830 
 831         for (i = 0; i < timeout; i++) {
 832                 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
 833                         break;
 834 
 835                 udelay(1);
 836         }
 837 
 838         if (unlikely(i == timeout)) {
 839                 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
 840                        mmio_read->seq_num, offset, read_resp->req_id,
 841                        read_resp->reg_off);
 842                 ret = ENA_MMIO_READ_TIMEOUT;
 843                 goto err;
 844         }
 845 
 846         if (read_resp->reg_off != offset) {
 847                 pr_err("Read failure: wrong offset provided\n");
 848                 ret = ENA_MMIO_READ_TIMEOUT;
 849         } else {
 850                 ret = read_resp->reg_val;
 851         }
 852 err:
 853         spin_unlock_irqrestore(&mmio_read->lock, flags);
 854 
 855         return ret;
 856 }
 857 
 858 /* There are two types to wait for completion.
 859  * Polling mode - wait until the completion is available.
 860  * Async mode - wait on wait queue until the completion is ready
 861  * (or the timeout expired).
 862  * It is expected that the IRQ called ena_com_handle_admin_completion
 863  * to mark the completions.
 864  */
 865 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
 866                                              struct ena_com_admin_queue *admin_queue)
 867 {
 868         if (admin_queue->polling)
 869                 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
 870                                                                  admin_queue);
 871 
 872         return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
 873                                                             admin_queue);
 874 }
 875 
 876 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
 877                                  struct ena_com_io_sq *io_sq)
 878 {
 879         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 880         struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
 881         struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
 882         u8 direction;
 883         int ret;
 884 
 885         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
 886 
 887         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 888                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
 889         else
 890                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
 891 
 892         destroy_cmd.sq.sq_identity |= (direction <<
 893                 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
 894                 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
 895 
 896         destroy_cmd.sq.sq_idx = io_sq->idx;
 897         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
 898 
 899         ret = ena_com_execute_admin_command(admin_queue,
 900                                             (struct ena_admin_aq_entry *)&destroy_cmd,
 901                                             sizeof(destroy_cmd),
 902                                             (struct ena_admin_acq_entry *)&destroy_resp,
 903                                             sizeof(destroy_resp));
 904 
 905         if (unlikely(ret && (ret != -ENODEV)))
 906                 pr_err("failed to destroy io sq error: %d\n", ret);
 907 
 908         return ret;
 909 }
 910 
 911 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
 912                                   struct ena_com_io_sq *io_sq,
 913                                   struct ena_com_io_cq *io_cq)
 914 {
 915         size_t size;
 916 
 917         if (io_cq->cdesc_addr.virt_addr) {
 918                 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 919 
 920                 dma_free_coherent(ena_dev->dmadev, size,
 921                                   io_cq->cdesc_addr.virt_addr,
 922                                   io_cq->cdesc_addr.phys_addr);
 923 
 924                 io_cq->cdesc_addr.virt_addr = NULL;
 925         }
 926 
 927         if (io_sq->desc_addr.virt_addr) {
 928                 size = io_sq->desc_entry_size * io_sq->q_depth;
 929 
 930                 dma_free_coherent(ena_dev->dmadev, size,
 931                                   io_sq->desc_addr.virt_addr,
 932                                   io_sq->desc_addr.phys_addr);
 933 
 934                 io_sq->desc_addr.virt_addr = NULL;
 935         }
 936 
 937         if (io_sq->bounce_buf_ctrl.base_buffer) {
 938                 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
 939                 io_sq->bounce_buf_ctrl.base_buffer = NULL;
 940         }
 941 }
 942 
 943 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
 944                                 u16 exp_state)
 945 {
 946         u32 val, i;
 947 
 948         /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
 949         timeout = (timeout * 100) / ENA_POLL_MS;
 950 
 951         for (i = 0; i < timeout; i++) {
 952                 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 953 
 954                 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
 955                         pr_err("Reg read timeout occurred\n");
 956                         return -ETIME;
 957                 }
 958 
 959                 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
 960                         exp_state)
 961                         return 0;
 962 
 963                 msleep(ENA_POLL_MS);
 964         }
 965 
 966         return -ETIME;
 967 }
 968 
 969 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
 970                                                enum ena_admin_aq_feature_id feature_id)
 971 {
 972         u32 feature_mask = 1 << feature_id;
 973 
 974         /* Device attributes is always supported */
 975         if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
 976             !(ena_dev->supported_features & feature_mask))
 977                 return false;
 978 
 979         return true;
 980 }
 981 
 982 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
 983                                   struct ena_admin_get_feat_resp *get_resp,
 984                                   enum ena_admin_aq_feature_id feature_id,
 985                                   dma_addr_t control_buf_dma_addr,
 986                                   u32 control_buff_size,
 987                                   u8 feature_ver)
 988 {
 989         struct ena_com_admin_queue *admin_queue;
 990         struct ena_admin_get_feat_cmd get_cmd;
 991         int ret;
 992 
 993         if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
 994                 pr_debug("Feature %d isn't supported\n", feature_id);
 995                 return -EOPNOTSUPP;
 996         }
 997 
 998         memset(&get_cmd, 0x0, sizeof(get_cmd));
 999         admin_queue = &ena_dev->admin_queue;
1000 
1001         get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1002 
1003         if (control_buff_size)
1004                 get_cmd.aq_common_descriptor.flags =
1005                         ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1006         else
1007                 get_cmd.aq_common_descriptor.flags = 0;
1008 
1009         ret = ena_com_mem_addr_set(ena_dev,
1010                                    &get_cmd.control_buffer.address,
1011                                    control_buf_dma_addr);
1012         if (unlikely(ret)) {
1013                 pr_err("memory address set failed\n");
1014                 return ret;
1015         }
1016 
1017         get_cmd.control_buffer.length = control_buff_size;
1018         get_cmd.feat_common.feature_version = feature_ver;
1019         get_cmd.feat_common.feature_id = feature_id;
1020 
1021         ret = ena_com_execute_admin_command(admin_queue,
1022                                             (struct ena_admin_aq_entry *)
1023                                             &get_cmd,
1024                                             sizeof(get_cmd),
1025                                             (struct ena_admin_acq_entry *)
1026                                             get_resp,
1027                                             sizeof(*get_resp));
1028 
1029         if (unlikely(ret))
1030                 pr_err("Failed to submit get_feature command %d error: %d\n",
1031                        feature_id, ret);
1032 
1033         return ret;
1034 }
1035 
1036 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1037                                struct ena_admin_get_feat_resp *get_resp,
1038                                enum ena_admin_aq_feature_id feature_id,
1039                                u8 feature_ver)
1040 {
1041         return ena_com_get_feature_ex(ena_dev,
1042                                       get_resp,
1043                                       feature_id,
1044                                       0,
1045                                       0,
1046                                       feature_ver);
1047 }
1048 
1049 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1050 {
1051         struct ena_admin_feature_rss_flow_hash_control *hash_key =
1052                 (ena_dev->rss).hash_key;
1053 
1054         netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1055         /* The key is stored in the device in u32 array
1056          * as well as the API requires the key to be passed in this
1057          * format. Thus the size of our array should be divided by 4
1058          */
1059         hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1060 }
1061 
1062 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1063 {
1064         return ena_dev->rss.hash_func;
1065 }
1066 
1067 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1068 {
1069         struct ena_rss *rss = &ena_dev->rss;
1070         struct ena_admin_feature_rss_flow_hash_control *hash_key;
1071         struct ena_admin_get_feat_resp get_resp;
1072         int rc;
1073 
1074         hash_key = (ena_dev->rss).hash_key;
1075 
1076         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
1077                                     ENA_ADMIN_RSS_HASH_FUNCTION,
1078                                     ena_dev->rss.hash_key_dma_addr,
1079                                     sizeof(ena_dev->rss.hash_key), 0);
1080         if (unlikely(rc)) {
1081                 hash_key = NULL;
1082                 return -EOPNOTSUPP;
1083         }
1084 
1085         rss->hash_key =
1086                 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1087                                    &rss->hash_key_dma_addr, GFP_KERNEL);
1088 
1089         if (unlikely(!rss->hash_key))
1090                 return -ENOMEM;
1091 
1092         return 0;
1093 }
1094 
1095 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1096 {
1097         struct ena_rss *rss = &ena_dev->rss;
1098 
1099         if (rss->hash_key)
1100                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1101                                   rss->hash_key, rss->hash_key_dma_addr);
1102         rss->hash_key = NULL;
1103 }
1104 
1105 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1106 {
1107         struct ena_rss *rss = &ena_dev->rss;
1108 
1109         rss->hash_ctrl =
1110                 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1111                                    &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1112 
1113         if (unlikely(!rss->hash_ctrl))
1114                 return -ENOMEM;
1115 
1116         return 0;
1117 }
1118 
1119 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1120 {
1121         struct ena_rss *rss = &ena_dev->rss;
1122 
1123         if (rss->hash_ctrl)
1124                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1125                                   rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1126         rss->hash_ctrl = NULL;
1127 }
1128 
1129 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1130                                            u16 log_size)
1131 {
1132         struct ena_rss *rss = &ena_dev->rss;
1133         struct ena_admin_get_feat_resp get_resp;
1134         size_t tbl_size;
1135         int ret;
1136 
1137         ret = ena_com_get_feature(ena_dev, &get_resp,
1138                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1139         if (unlikely(ret))
1140                 return ret;
1141 
1142         if ((get_resp.u.ind_table.min_size > log_size) ||
1143             (get_resp.u.ind_table.max_size < log_size)) {
1144                 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1145                        1 << log_size, 1 << get_resp.u.ind_table.min_size,
1146                        1 << get_resp.u.ind_table.max_size);
1147                 return -EINVAL;
1148         }
1149 
1150         tbl_size = (1ULL << log_size) *
1151                 sizeof(struct ena_admin_rss_ind_table_entry);
1152 
1153         rss->rss_ind_tbl =
1154                 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1155                                    &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1156         if (unlikely(!rss->rss_ind_tbl))
1157                 goto mem_err1;
1158 
1159         tbl_size = (1ULL << log_size) * sizeof(u16);
1160         rss->host_rss_ind_tbl =
1161                 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1162         if (unlikely(!rss->host_rss_ind_tbl))
1163                 goto mem_err2;
1164 
1165         rss->tbl_log_size = log_size;
1166 
1167         return 0;
1168 
1169 mem_err2:
1170         tbl_size = (1ULL << log_size) *
1171                 sizeof(struct ena_admin_rss_ind_table_entry);
1172 
1173         dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1174                           rss->rss_ind_tbl_dma_addr);
1175         rss->rss_ind_tbl = NULL;
1176 mem_err1:
1177         rss->tbl_log_size = 0;
1178         return -ENOMEM;
1179 }
1180 
1181 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1182 {
1183         struct ena_rss *rss = &ena_dev->rss;
1184         size_t tbl_size = (1ULL << rss->tbl_log_size) *
1185                 sizeof(struct ena_admin_rss_ind_table_entry);
1186 
1187         if (rss->rss_ind_tbl)
1188                 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1189                                   rss->rss_ind_tbl_dma_addr);
1190         rss->rss_ind_tbl = NULL;
1191 
1192         if (rss->host_rss_ind_tbl)
1193                 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1194         rss->host_rss_ind_tbl = NULL;
1195 }
1196 
1197 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1198                                 struct ena_com_io_sq *io_sq, u16 cq_idx)
1199 {
1200         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1201         struct ena_admin_aq_create_sq_cmd create_cmd;
1202         struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1203         u8 direction;
1204         int ret;
1205 
1206         memset(&create_cmd, 0x0, sizeof(create_cmd));
1207 
1208         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1209 
1210         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1211                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1212         else
1213                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1214 
1215         create_cmd.sq_identity |= (direction <<
1216                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1217                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1218 
1219         create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1220                 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1221 
1222         create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1223                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1224                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1225 
1226         create_cmd.sq_caps_3 |=
1227                 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1228 
1229         create_cmd.cq_idx = cq_idx;
1230         create_cmd.sq_depth = io_sq->q_depth;
1231 
1232         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1233                 ret = ena_com_mem_addr_set(ena_dev,
1234                                            &create_cmd.sq_ba,
1235                                            io_sq->desc_addr.phys_addr);
1236                 if (unlikely(ret)) {
1237                         pr_err("memory address set failed\n");
1238                         return ret;
1239                 }
1240         }
1241 
1242         ret = ena_com_execute_admin_command(admin_queue,
1243                                             (struct ena_admin_aq_entry *)&create_cmd,
1244                                             sizeof(create_cmd),
1245                                             (struct ena_admin_acq_entry *)&cmd_completion,
1246                                             sizeof(cmd_completion));
1247         if (unlikely(ret)) {
1248                 pr_err("Failed to create IO SQ. error: %d\n", ret);
1249                 return ret;
1250         }
1251 
1252         io_sq->idx = cmd_completion.sq_idx;
1253 
1254         io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1255                 (uintptr_t)cmd_completion.sq_doorbell_offset);
1256 
1257         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1258                 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1259                                 + cmd_completion.llq_headers_offset);
1260 
1261                 io_sq->desc_addr.pbuf_dev_addr =
1262                         (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1263                         cmd_completion.llq_descriptors_offset);
1264         }
1265 
1266         pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1267 
1268         return ret;
1269 }
1270 
1271 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1272 {
1273         struct ena_rss *rss = &ena_dev->rss;
1274         struct ena_com_io_sq *io_sq;
1275         u16 qid;
1276         int i;
1277 
1278         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1279                 qid = rss->host_rss_ind_tbl[i];
1280                 if (qid >= ENA_TOTAL_NUM_QUEUES)
1281                         return -EINVAL;
1282 
1283                 io_sq = &ena_dev->io_sq_queues[qid];
1284 
1285                 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1286                         return -EINVAL;
1287 
1288                 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1289         }
1290 
1291         return 0;
1292 }
1293 
1294 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1295                                                  u16 intr_delay_resolution)
1296 {
1297         /* Initial value of intr_delay_resolution might be 0 */
1298         u16 prev_intr_delay_resolution =
1299                 ena_dev->intr_delay_resolution ?
1300                 ena_dev->intr_delay_resolution :
1301                 ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1302 
1303         if (!intr_delay_resolution) {
1304                 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1305                 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1306         }
1307 
1308         /* update Rx */
1309         ena_dev->intr_moder_rx_interval =
1310                 ena_dev->intr_moder_rx_interval *
1311                 prev_intr_delay_resolution /
1312                 intr_delay_resolution;
1313 
1314         /* update Tx */
1315         ena_dev->intr_moder_tx_interval =
1316                 ena_dev->intr_moder_tx_interval *
1317                 prev_intr_delay_resolution /
1318                 intr_delay_resolution;
1319 
1320         ena_dev->intr_delay_resolution = intr_delay_resolution;
1321 }
1322 
1323 /*****************************************************************************/
1324 /*******************************      API       ******************************/
1325 /*****************************************************************************/
1326 
1327 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1328                                   struct ena_admin_aq_entry *cmd,
1329                                   size_t cmd_size,
1330                                   struct ena_admin_acq_entry *comp,
1331                                   size_t comp_size)
1332 {
1333         struct ena_comp_ctx *comp_ctx;
1334         int ret;
1335 
1336         comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1337                                             comp, comp_size);
1338         if (IS_ERR(comp_ctx)) {
1339                 if (comp_ctx == ERR_PTR(-ENODEV))
1340                         pr_debug("Failed to submit command [%ld]\n",
1341                                  PTR_ERR(comp_ctx));
1342                 else
1343                         pr_err("Failed to submit command [%ld]\n",
1344                                PTR_ERR(comp_ctx));
1345 
1346                 return PTR_ERR(comp_ctx);
1347         }
1348 
1349         ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1350         if (unlikely(ret)) {
1351                 if (admin_queue->running_state)
1352                         pr_err("Failed to process command. ret = %d\n", ret);
1353                 else
1354                         pr_debug("Failed to process command. ret = %d\n", ret);
1355         }
1356         return ret;
1357 }
1358 
1359 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1360                          struct ena_com_io_cq *io_cq)
1361 {
1362         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1363         struct ena_admin_aq_create_cq_cmd create_cmd;
1364         struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1365         int ret;
1366 
1367         memset(&create_cmd, 0x0, sizeof(create_cmd));
1368 
1369         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1370 
1371         create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1372                 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1373         create_cmd.cq_caps_1 |=
1374                 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1375 
1376         create_cmd.msix_vector = io_cq->msix_vector;
1377         create_cmd.cq_depth = io_cq->q_depth;
1378 
1379         ret = ena_com_mem_addr_set(ena_dev,
1380                                    &create_cmd.cq_ba,
1381                                    io_cq->cdesc_addr.phys_addr);
1382         if (unlikely(ret)) {
1383                 pr_err("memory address set failed\n");
1384                 return ret;
1385         }
1386 
1387         ret = ena_com_execute_admin_command(admin_queue,
1388                                             (struct ena_admin_aq_entry *)&create_cmd,
1389                                             sizeof(create_cmd),
1390                                             (struct ena_admin_acq_entry *)&cmd_completion,
1391                                             sizeof(cmd_completion));
1392         if (unlikely(ret)) {
1393                 pr_err("Failed to create IO CQ. error: %d\n", ret);
1394                 return ret;
1395         }
1396 
1397         io_cq->idx = cmd_completion.cq_idx;
1398 
1399         io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1400                 cmd_completion.cq_interrupt_unmask_register_offset);
1401 
1402         if (cmd_completion.cq_head_db_register_offset)
1403                 io_cq->cq_head_db_reg =
1404                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1405                         cmd_completion.cq_head_db_register_offset);
1406 
1407         if (cmd_completion.numa_node_register_offset)
1408                 io_cq->numa_node_cfg_reg =
1409                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1410                         cmd_completion.numa_node_register_offset);
1411 
1412         pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1413 
1414         return ret;
1415 }
1416 
1417 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1418                             struct ena_com_io_sq **io_sq,
1419                             struct ena_com_io_cq **io_cq)
1420 {
1421         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1422                 pr_err("Invalid queue number %d but the max is %d\n", qid,
1423                        ENA_TOTAL_NUM_QUEUES);
1424                 return -EINVAL;
1425         }
1426 
1427         *io_sq = &ena_dev->io_sq_queues[qid];
1428         *io_cq = &ena_dev->io_cq_queues[qid];
1429 
1430         return 0;
1431 }
1432 
1433 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1434 {
1435         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1436         struct ena_comp_ctx *comp_ctx;
1437         u16 i;
1438 
1439         if (!admin_queue->comp_ctx)
1440                 return;
1441 
1442         for (i = 0; i < admin_queue->q_depth; i++) {
1443                 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1444                 if (unlikely(!comp_ctx))
1445                         break;
1446 
1447                 comp_ctx->status = ENA_CMD_ABORTED;
1448 
1449                 complete(&comp_ctx->wait_event);
1450         }
1451 }
1452 
1453 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1454 {
1455         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1456         unsigned long flags = 0;
1457 
1458         spin_lock_irqsave(&admin_queue->q_lock, flags);
1459         while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1460                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1461                 msleep(ENA_POLL_MS);
1462                 spin_lock_irqsave(&admin_queue->q_lock, flags);
1463         }
1464         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1465 }
1466 
1467 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1468                           struct ena_com_io_cq *io_cq)
1469 {
1470         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1471         struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1472         struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1473         int ret;
1474 
1475         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1476 
1477         destroy_cmd.cq_idx = io_cq->idx;
1478         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1479 
1480         ret = ena_com_execute_admin_command(admin_queue,
1481                                             (struct ena_admin_aq_entry *)&destroy_cmd,
1482                                             sizeof(destroy_cmd),
1483                                             (struct ena_admin_acq_entry *)&destroy_resp,
1484                                             sizeof(destroy_resp));
1485 
1486         if (unlikely(ret && (ret != -ENODEV)))
1487                 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1488 
1489         return ret;
1490 }
1491 
1492 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1493 {
1494         return ena_dev->admin_queue.running_state;
1495 }
1496 
1497 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1498 {
1499         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1500         unsigned long flags = 0;
1501 
1502         spin_lock_irqsave(&admin_queue->q_lock, flags);
1503         ena_dev->admin_queue.running_state = state;
1504         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1505 }
1506 
1507 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1508 {
1509         u16 depth = ena_dev->aenq.q_depth;
1510 
1511         WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1512 
1513         /* Init head_db to mark that all entries in the queue
1514          * are initially available
1515          */
1516         writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1517 }
1518 
1519 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1520 {
1521         struct ena_com_admin_queue *admin_queue;
1522         struct ena_admin_set_feat_cmd cmd;
1523         struct ena_admin_set_feat_resp resp;
1524         struct ena_admin_get_feat_resp get_resp;
1525         int ret;
1526 
1527         ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1528         if (ret) {
1529                 pr_info("Can't get aenq configuration\n");
1530                 return ret;
1531         }
1532 
1533         if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1534                 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1535                         get_resp.u.aenq.supported_groups, groups_flag);
1536                 return -EOPNOTSUPP;
1537         }
1538 
1539         memset(&cmd, 0x0, sizeof(cmd));
1540         admin_queue = &ena_dev->admin_queue;
1541 
1542         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1543         cmd.aq_common_descriptor.flags = 0;
1544         cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1545         cmd.u.aenq.enabled_groups = groups_flag;
1546 
1547         ret = ena_com_execute_admin_command(admin_queue,
1548                                             (struct ena_admin_aq_entry *)&cmd,
1549                                             sizeof(cmd),
1550                                             (struct ena_admin_acq_entry *)&resp,
1551                                             sizeof(resp));
1552 
1553         if (unlikely(ret))
1554                 pr_err("Failed to config AENQ ret: %d\n", ret);
1555 
1556         return ret;
1557 }
1558 
1559 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1560 {
1561         u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1562         int width;
1563 
1564         if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1565                 pr_err("Reg read timeout occurred\n");
1566                 return -ETIME;
1567         }
1568 
1569         width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1570                 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1571 
1572         pr_debug("ENA dma width: %d\n", width);
1573 
1574         if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1575                 pr_err("DMA width illegal value: %d\n", width);
1576                 return -EINVAL;
1577         }
1578 
1579         ena_dev->dma_addr_bits = width;
1580 
1581         return width;
1582 }
1583 
1584 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1585 {
1586         u32 ver;
1587         u32 ctrl_ver;
1588         u32 ctrl_ver_masked;
1589 
1590         /* Make sure the ENA version and the controller version are at least
1591          * as the driver expects
1592          */
1593         ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1594         ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1595                                           ENA_REGS_CONTROLLER_VERSION_OFF);
1596 
1597         if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1598                      (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1599                 pr_err("Reg read timeout occurred\n");
1600                 return -ETIME;
1601         }
1602 
1603         pr_info("ena device version: %d.%d\n",
1604                 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1605                         ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1606                 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1607 
1608         pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1609                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1610                         ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1611                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1612                         ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1613                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1614                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1615                         ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1616 
1617         ctrl_ver_masked =
1618                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1619                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1620                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1621 
1622         /* Validate the ctrl version without the implementation ID */
1623         if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1624                 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1625                 return -1;
1626         }
1627 
1628         return 0;
1629 }
1630 
1631 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1632 {
1633         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1634         struct ena_com_admin_cq *cq = &admin_queue->cq;
1635         struct ena_com_admin_sq *sq = &admin_queue->sq;
1636         struct ena_com_aenq *aenq = &ena_dev->aenq;
1637         u16 size;
1638 
1639         if (admin_queue->comp_ctx)
1640                 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1641         admin_queue->comp_ctx = NULL;
1642         size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1643         if (sq->entries)
1644                 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1645                                   sq->dma_addr);
1646         sq->entries = NULL;
1647 
1648         size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1649         if (cq->entries)
1650                 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1651                                   cq->dma_addr);
1652         cq->entries = NULL;
1653 
1654         size = ADMIN_AENQ_SIZE(aenq->q_depth);
1655         if (ena_dev->aenq.entries)
1656                 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1657                                   aenq->dma_addr);
1658         aenq->entries = NULL;
1659 }
1660 
1661 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1662 {
1663         u32 mask_value = 0;
1664 
1665         if (polling)
1666                 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1667 
1668         writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1669         ena_dev->admin_queue.polling = polling;
1670 }
1671 
1672 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1673                                          bool polling)
1674 {
1675         ena_dev->admin_queue.auto_polling = polling;
1676 }
1677 
1678 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1679 {
1680         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1681 
1682         spin_lock_init(&mmio_read->lock);
1683         mmio_read->read_resp =
1684                 dma_alloc_coherent(ena_dev->dmadev,
1685                                    sizeof(*mmio_read->read_resp),
1686                                    &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1687         if (unlikely(!mmio_read->read_resp))
1688                 goto err;
1689 
1690         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1691 
1692         mmio_read->read_resp->req_id = 0x0;
1693         mmio_read->seq_num = 0x0;
1694         mmio_read->readless_supported = true;
1695 
1696         return 0;
1697 
1698 err:
1699 
1700         return -ENOMEM;
1701 }
1702 
1703 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1704 {
1705         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1706 
1707         mmio_read->readless_supported = readless_supported;
1708 }
1709 
1710 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1711 {
1712         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1713 
1714         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1715         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1716 
1717         dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1718                           mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1719 
1720         mmio_read->read_resp = NULL;
1721 }
1722 
1723 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1724 {
1725         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1726         u32 addr_low, addr_high;
1727 
1728         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1729         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1730 
1731         writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1732         writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1733 }
1734 
1735 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1736                        struct ena_aenq_handlers *aenq_handlers)
1737 {
1738         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1739         u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1740         int ret;
1741 
1742         dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1743 
1744         if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1745                 pr_err("Reg read timeout occurred\n");
1746                 return -ETIME;
1747         }
1748 
1749         if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1750                 pr_err("Device isn't ready, abort com init\n");
1751                 return -ENODEV;
1752         }
1753 
1754         admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1755 
1756         admin_queue->q_dmadev = ena_dev->dmadev;
1757         admin_queue->polling = false;
1758         admin_queue->curr_cmd_id = 0;
1759 
1760         atomic_set(&admin_queue->outstanding_cmds, 0);
1761 
1762         spin_lock_init(&admin_queue->q_lock);
1763 
1764         ret = ena_com_init_comp_ctxt(admin_queue);
1765         if (ret)
1766                 goto error;
1767 
1768         ret = ena_com_admin_init_sq(admin_queue);
1769         if (ret)
1770                 goto error;
1771 
1772         ret = ena_com_admin_init_cq(admin_queue);
1773         if (ret)
1774                 goto error;
1775 
1776         admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1777                 ENA_REGS_AQ_DB_OFF);
1778 
1779         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1780         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1781 
1782         writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1783         writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1784 
1785         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1786         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1787 
1788         writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1789         writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1790 
1791         aq_caps = 0;
1792         aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1793         aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1794                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1795                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1796 
1797         acq_caps = 0;
1798         acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1799         acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1800                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1801                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1802 
1803         writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1804         writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1805         ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1806         if (ret)
1807                 goto error;
1808 
1809         admin_queue->running_state = true;
1810 
1811         return 0;
1812 error:
1813         ena_com_admin_destroy(ena_dev);
1814 
1815         return ret;
1816 }
1817 
1818 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1819                             struct ena_com_create_io_ctx *ctx)
1820 {
1821         struct ena_com_io_sq *io_sq;
1822         struct ena_com_io_cq *io_cq;
1823         int ret;
1824 
1825         if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1826                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1827                        ctx->qid, ENA_TOTAL_NUM_QUEUES);
1828                 return -EINVAL;
1829         }
1830 
1831         io_sq = &ena_dev->io_sq_queues[ctx->qid];
1832         io_cq = &ena_dev->io_cq_queues[ctx->qid];
1833 
1834         memset(io_sq, 0x0, sizeof(*io_sq));
1835         memset(io_cq, 0x0, sizeof(*io_cq));
1836 
1837         /* Init CQ */
1838         io_cq->q_depth = ctx->queue_size;
1839         io_cq->direction = ctx->direction;
1840         io_cq->qid = ctx->qid;
1841 
1842         io_cq->msix_vector = ctx->msix_vector;
1843 
1844         io_sq->q_depth = ctx->queue_size;
1845         io_sq->direction = ctx->direction;
1846         io_sq->qid = ctx->qid;
1847 
1848         io_sq->mem_queue_type = ctx->mem_queue_type;
1849 
1850         if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1851                 /* header length is limited to 8 bits */
1852                 io_sq->tx_max_header_size =
1853                         min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1854 
1855         ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1856         if (ret)
1857                 goto error;
1858         ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1859         if (ret)
1860                 goto error;
1861 
1862         ret = ena_com_create_io_cq(ena_dev, io_cq);
1863         if (ret)
1864                 goto error;
1865 
1866         ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1867         if (ret)
1868                 goto destroy_io_cq;
1869 
1870         return 0;
1871 
1872 destroy_io_cq:
1873         ena_com_destroy_io_cq(ena_dev, io_cq);
1874 error:
1875         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1876         return ret;
1877 }
1878 
1879 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1880 {
1881         struct ena_com_io_sq *io_sq;
1882         struct ena_com_io_cq *io_cq;
1883 
1884         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1885                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1886                        ENA_TOTAL_NUM_QUEUES);
1887                 return;
1888         }
1889 
1890         io_sq = &ena_dev->io_sq_queues[qid];
1891         io_cq = &ena_dev->io_cq_queues[qid];
1892 
1893         ena_com_destroy_io_sq(ena_dev, io_sq);
1894         ena_com_destroy_io_cq(ena_dev, io_cq);
1895 
1896         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1897 }
1898 
1899 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1900                             struct ena_admin_get_feat_resp *resp)
1901 {
1902         return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1903 }
1904 
1905 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1906                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
1907 {
1908         struct ena_admin_get_feat_resp get_resp;
1909         int rc;
1910 
1911         rc = ena_com_get_feature(ena_dev, &get_resp,
1912                                  ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1913         if (rc)
1914                 return rc;
1915 
1916         memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1917                sizeof(get_resp.u.dev_attr));
1918         ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1919 
1920         if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1921                 rc = ena_com_get_feature(ena_dev, &get_resp,
1922                                          ENA_ADMIN_MAX_QUEUES_EXT,
1923                                          ENA_FEATURE_MAX_QUEUE_EXT_VER);
1924                 if (rc)
1925                         return rc;
1926 
1927                 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1928                         return -EINVAL;
1929 
1930                 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1931                        sizeof(get_resp.u.max_queue_ext));
1932                 ena_dev->tx_max_header_size =
1933                         get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1934         } else {
1935                 rc = ena_com_get_feature(ena_dev, &get_resp,
1936                                          ENA_ADMIN_MAX_QUEUES_NUM, 0);
1937                 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1938                        sizeof(get_resp.u.max_queue));
1939                 ena_dev->tx_max_header_size =
1940                         get_resp.u.max_queue.max_header_size;
1941 
1942                 if (rc)
1943                         return rc;
1944         }
1945 
1946         rc = ena_com_get_feature(ena_dev, &get_resp,
1947                                  ENA_ADMIN_AENQ_CONFIG, 0);
1948         if (rc)
1949                 return rc;
1950 
1951         memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1952                sizeof(get_resp.u.aenq));
1953 
1954         rc = ena_com_get_feature(ena_dev, &get_resp,
1955                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1956         if (rc)
1957                 return rc;
1958 
1959         memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1960                sizeof(get_resp.u.offload));
1961 
1962         /* Driver hints isn't mandatory admin command. So in case the
1963          * command isn't supported set driver hints to 0
1964          */
1965         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1966 
1967         if (!rc)
1968                 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1969                        sizeof(get_resp.u.hw_hints));
1970         else if (rc == -EOPNOTSUPP)
1971                 memset(&get_feat_ctx->hw_hints, 0x0,
1972                        sizeof(get_feat_ctx->hw_hints));
1973         else
1974                 return rc;
1975 
1976         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1977         if (!rc)
1978                 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1979                        sizeof(get_resp.u.llq));
1980         else if (rc == -EOPNOTSUPP)
1981                 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1982         else
1983                 return rc;
1984 
1985         return 0;
1986 }
1987 
1988 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1989 {
1990         ena_com_handle_admin_completion(&ena_dev->admin_queue);
1991 }
1992 
1993 /* ena_handle_specific_aenq_event:
1994  * return the handler that is relevant to the specific event group
1995  */
1996 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1997                                                      u16 group)
1998 {
1999         struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2000 
2001         if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2002                 return aenq_handlers->handlers[group];
2003 
2004         return aenq_handlers->unimplemented_handler;
2005 }
2006 
2007 /* ena_aenq_intr_handler:
2008  * handles the aenq incoming events.
2009  * pop events from the queue and apply the specific handler
2010  */
2011 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2012 {
2013         struct ena_admin_aenq_entry *aenq_e;
2014         struct ena_admin_aenq_common_desc *aenq_common;
2015         struct ena_com_aenq *aenq  = &dev->aenq;
2016         unsigned long long timestamp;
2017         ena_aenq_handler handler_cb;
2018         u16 masked_head, processed = 0;
2019         u8 phase;
2020 
2021         masked_head = aenq->head & (aenq->q_depth - 1);
2022         phase = aenq->phase;
2023         aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2024         aenq_common = &aenq_e->aenq_common_desc;
2025 
2026         /* Go over all the events */
2027         while ((READ_ONCE(aenq_common->flags) &
2028                 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2029                 /* Make sure the phase bit (ownership) is as expected before
2030                  * reading the rest of the descriptor.
2031                  */
2032                 dma_rmb();
2033 
2034                 timestamp =
2035                         (unsigned long long)aenq_common->timestamp_low |
2036                         ((unsigned long long)aenq_common->timestamp_high << 32);
2037                 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2038                          aenq_common->group, aenq_common->syndrom, timestamp);
2039 
2040                 /* Handle specific event*/
2041                 handler_cb = ena_com_get_specific_aenq_cb(dev,
2042                                                           aenq_common->group);
2043                 handler_cb(data, aenq_e); /* call the actual event handler*/
2044 
2045                 /* Get next event entry */
2046                 masked_head++;
2047                 processed++;
2048 
2049                 if (unlikely(masked_head == aenq->q_depth)) {
2050                         masked_head = 0;
2051                         phase = !phase;
2052                 }
2053                 aenq_e = &aenq->entries[masked_head];
2054                 aenq_common = &aenq_e->aenq_common_desc;
2055         }
2056 
2057         aenq->head += processed;
2058         aenq->phase = phase;
2059 
2060         /* Don't update aenq doorbell if there weren't any processed events */
2061         if (!processed)
2062                 return;
2063 
2064         /* write the aenq doorbell after all AENQ descriptors were read */
2065         mb();
2066         writel_relaxed((u32)aenq->head,
2067                        dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2068 }
2069 
2070 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2071                       enum ena_regs_reset_reason_types reset_reason)
2072 {
2073         u32 stat, timeout, cap, reset_val;
2074         int rc;
2075 
2076         stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2077         cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2078 
2079         if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2080                      (cap == ENA_MMIO_READ_TIMEOUT))) {
2081                 pr_err("Reg read32 timeout occurred\n");
2082                 return -ETIME;
2083         }
2084 
2085         if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2086                 pr_err("Device isn't ready, can't reset device\n");
2087                 return -EINVAL;
2088         }
2089 
2090         timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2091                         ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2092         if (timeout == 0) {
2093                 pr_err("Invalid timeout value\n");
2094                 return -EINVAL;
2095         }
2096 
2097         /* start reset */
2098         reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2099         reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2100                      ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2101         writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2102 
2103         /* Write again the MMIO read request address */
2104         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2105 
2106         rc = wait_for_reset_state(ena_dev, timeout,
2107                                   ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2108         if (rc != 0) {
2109                 pr_err("Reset indication didn't turn on\n");
2110                 return rc;
2111         }
2112 
2113         /* reset done */
2114         writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2115         rc = wait_for_reset_state(ena_dev, timeout, 0);
2116         if (rc != 0) {
2117                 pr_err("Reset indication didn't turn off\n");
2118                 return rc;
2119         }
2120 
2121         timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2122                 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2123         if (timeout)
2124                 /* the resolution of timeout reg is 100ms */
2125                 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2126         else
2127                 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2128 
2129         return 0;
2130 }
2131 
2132 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2133                              struct ena_com_stats_ctx *ctx,
2134                              enum ena_admin_get_stats_type type)
2135 {
2136         struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2137         struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2138         struct ena_com_admin_queue *admin_queue;
2139         int ret;
2140 
2141         admin_queue = &ena_dev->admin_queue;
2142 
2143         get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2144         get_cmd->aq_common_descriptor.flags = 0;
2145         get_cmd->type = type;
2146 
2147         ret =  ena_com_execute_admin_command(admin_queue,
2148                                              (struct ena_admin_aq_entry *)get_cmd,
2149                                              sizeof(*get_cmd),
2150                                              (struct ena_admin_acq_entry *)get_resp,
2151                                              sizeof(*get_resp));
2152 
2153         if (unlikely(ret))
2154                 pr_err("Failed to get stats. error: %d\n", ret);
2155 
2156         return ret;
2157 }
2158 
2159 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2160                                 struct ena_admin_basic_stats *stats)
2161 {
2162         struct ena_com_stats_ctx ctx;
2163         int ret;
2164 
2165         memset(&ctx, 0x0, sizeof(ctx));
2166         ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2167         if (likely(ret == 0))
2168                 memcpy(stats, &ctx.get_resp.basic_stats,
2169                        sizeof(ctx.get_resp.basic_stats));
2170 
2171         return ret;
2172 }
2173 
2174 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2175 {
2176         struct ena_com_admin_queue *admin_queue;
2177         struct ena_admin_set_feat_cmd cmd;
2178         struct ena_admin_set_feat_resp resp;
2179         int ret;
2180 
2181         if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2182                 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2183                 return -EOPNOTSUPP;
2184         }
2185 
2186         memset(&cmd, 0x0, sizeof(cmd));
2187         admin_queue = &ena_dev->admin_queue;
2188 
2189         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2190         cmd.aq_common_descriptor.flags = 0;
2191         cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2192         cmd.u.mtu.mtu = mtu;
2193 
2194         ret = ena_com_execute_admin_command(admin_queue,
2195                                             (struct ena_admin_aq_entry *)&cmd,
2196                                             sizeof(cmd),
2197                                             (struct ena_admin_acq_entry *)&resp,
2198                                             sizeof(resp));
2199 
2200         if (unlikely(ret))
2201                 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2202 
2203         return ret;
2204 }
2205 
2206 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2207                                  struct ena_admin_feature_offload_desc *offload)
2208 {
2209         int ret;
2210         struct ena_admin_get_feat_resp resp;
2211 
2212         ret = ena_com_get_feature(ena_dev, &resp,
2213                                   ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2214         if (unlikely(ret)) {
2215                 pr_err("Failed to get offload capabilities %d\n", ret);
2216                 return ret;
2217         }
2218 
2219         memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2220 
2221         return 0;
2222 }
2223 
2224 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2225 {
2226         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2227         struct ena_rss *rss = &ena_dev->rss;
2228         struct ena_admin_set_feat_cmd cmd;
2229         struct ena_admin_set_feat_resp resp;
2230         struct ena_admin_get_feat_resp get_resp;
2231         int ret;
2232 
2233         if (!ena_com_check_supported_feature_id(ena_dev,
2234                                                 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2235                 pr_debug("Feature %d isn't supported\n",
2236                          ENA_ADMIN_RSS_HASH_FUNCTION);
2237                 return -EOPNOTSUPP;
2238         }
2239 
2240         /* Validate hash function is supported */
2241         ret = ena_com_get_feature(ena_dev, &get_resp,
2242                                   ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2243         if (unlikely(ret))
2244                 return ret;
2245 
2246         if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2247                 pr_err("Func hash %d isn't supported by device, abort\n",
2248                        rss->hash_func);
2249                 return -EOPNOTSUPP;
2250         }
2251 
2252         memset(&cmd, 0x0, sizeof(cmd));
2253 
2254         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2255         cmd.aq_common_descriptor.flags =
2256                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2257         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2258         cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2259         cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2260 
2261         ret = ena_com_mem_addr_set(ena_dev,
2262                                    &cmd.control_buffer.address,
2263                                    rss->hash_key_dma_addr);
2264         if (unlikely(ret)) {
2265                 pr_err("memory address set failed\n");
2266                 return ret;
2267         }
2268 
2269         cmd.control_buffer.length = sizeof(*rss->hash_key);
2270 
2271         ret = ena_com_execute_admin_command(admin_queue,
2272                                             (struct ena_admin_aq_entry *)&cmd,
2273                                             sizeof(cmd),
2274                                             (struct ena_admin_acq_entry *)&resp,
2275                                             sizeof(resp));
2276         if (unlikely(ret)) {
2277                 pr_err("Failed to set hash function %d. error: %d\n",
2278                        rss->hash_func, ret);
2279                 return -EINVAL;
2280         }
2281 
2282         return 0;
2283 }
2284 
2285 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2286                                enum ena_admin_hash_functions func,
2287                                const u8 *key, u16 key_len, u32 init_val)
2288 {
2289         struct ena_rss *rss = &ena_dev->rss;
2290         struct ena_admin_get_feat_resp get_resp;
2291         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2292                 rss->hash_key;
2293         int rc;
2294 
2295         /* Make sure size is a mult of DWs */
2296         if (unlikely(key_len & 0x3))
2297                 return -EINVAL;
2298 
2299         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2300                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2301                                     rss->hash_key_dma_addr,
2302                                     sizeof(*rss->hash_key), 0);
2303         if (unlikely(rc))
2304                 return rc;
2305 
2306         if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2307                 pr_err("Flow hash function %d isn't supported\n", func);
2308                 return -EOPNOTSUPP;
2309         }
2310 
2311         switch (func) {
2312         case ENA_ADMIN_TOEPLITZ:
2313                 if (key) {
2314                         if (key_len != sizeof(hash_key->key)) {
2315                                 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2316                                        key_len, sizeof(hash_key->key));
2317                                 return -EINVAL;
2318                         }
2319                         memcpy(hash_key->key, key, key_len);
2320                         rss->hash_init_val = init_val;
2321                         hash_key->keys_num = key_len >> 2;
2322                 }
2323                 break;
2324         case ENA_ADMIN_CRC32:
2325                 rss->hash_init_val = init_val;
2326                 break;
2327         default:
2328                 pr_err("Invalid hash function (%d)\n", func);
2329                 return -EINVAL;
2330         }
2331 
2332         rss->hash_func = func;
2333         rc = ena_com_set_hash_function(ena_dev);
2334 
2335         /* Restore the old function */
2336         if (unlikely(rc))
2337                 ena_com_get_hash_function(ena_dev, NULL, NULL);
2338 
2339         return rc;
2340 }
2341 
2342 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2343                               enum ena_admin_hash_functions *func,
2344                               u8 *key)
2345 {
2346         struct ena_rss *rss = &ena_dev->rss;
2347         struct ena_admin_get_feat_resp get_resp;
2348         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2349                 rss->hash_key;
2350         int rc;
2351 
2352         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2353                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2354                                     rss->hash_key_dma_addr,
2355                                     sizeof(*rss->hash_key), 0);
2356         if (unlikely(rc))
2357                 return rc;
2358 
2359         /* ffs() returns 1 in case the lsb is set */
2360         rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2361         if (rss->hash_func)
2362                 rss->hash_func--;
2363 
2364         if (func)
2365                 *func = rss->hash_func;
2366 
2367         if (key)
2368                 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2369 
2370         return 0;
2371 }
2372 
2373 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2374                           enum ena_admin_flow_hash_proto proto,
2375                           u16 *fields)
2376 {
2377         struct ena_rss *rss = &ena_dev->rss;
2378         struct ena_admin_get_feat_resp get_resp;
2379         int rc;
2380 
2381         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2382                                     ENA_ADMIN_RSS_HASH_INPUT,
2383                                     rss->hash_ctrl_dma_addr,
2384                                     sizeof(*rss->hash_ctrl), 0);
2385         if (unlikely(rc))
2386                 return rc;
2387 
2388         if (fields)
2389                 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2390 
2391         return 0;
2392 }
2393 
2394 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2395 {
2396         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2397         struct ena_rss *rss = &ena_dev->rss;
2398         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2399         struct ena_admin_set_feat_cmd cmd;
2400         struct ena_admin_set_feat_resp resp;
2401         int ret;
2402 
2403         if (!ena_com_check_supported_feature_id(ena_dev,
2404                                                 ENA_ADMIN_RSS_HASH_INPUT)) {
2405                 pr_debug("Feature %d isn't supported\n",
2406                          ENA_ADMIN_RSS_HASH_INPUT);
2407                 return -EOPNOTSUPP;
2408         }
2409 
2410         memset(&cmd, 0x0, sizeof(cmd));
2411 
2412         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2413         cmd.aq_common_descriptor.flags =
2414                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2415         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2416         cmd.u.flow_hash_input.enabled_input_sort =
2417                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2418                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2419 
2420         ret = ena_com_mem_addr_set(ena_dev,
2421                                    &cmd.control_buffer.address,
2422                                    rss->hash_ctrl_dma_addr);
2423         if (unlikely(ret)) {
2424                 pr_err("memory address set failed\n");
2425                 return ret;
2426         }
2427         cmd.control_buffer.length = sizeof(*hash_ctrl);
2428 
2429         ret = ena_com_execute_admin_command(admin_queue,
2430                                             (struct ena_admin_aq_entry *)&cmd,
2431                                             sizeof(cmd),
2432                                             (struct ena_admin_acq_entry *)&resp,
2433                                             sizeof(resp));
2434         if (unlikely(ret))
2435                 pr_err("Failed to set hash input. error: %d\n", ret);
2436 
2437         return ret;
2438 }
2439 
2440 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2441 {
2442         struct ena_rss *rss = &ena_dev->rss;
2443         struct ena_admin_feature_rss_hash_control *hash_ctrl =
2444                 rss->hash_ctrl;
2445         u16 available_fields = 0;
2446         int rc, i;
2447 
2448         /* Get the supported hash input */
2449         rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2450         if (unlikely(rc))
2451                 return rc;
2452 
2453         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2454                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2455                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2456 
2457         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2458                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2459                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2460 
2461         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2462                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2463                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2464 
2465         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2466                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2467                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2468 
2469         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2470                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2471 
2472         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2473                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2474 
2475         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2476                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2477 
2478         hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2479                 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2480 
2481         for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2482                 available_fields = hash_ctrl->selected_fields[i].fields &
2483                                 hash_ctrl->supported_fields[i].fields;
2484                 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2485                         pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2486                                i, hash_ctrl->supported_fields[i].fields,
2487                                hash_ctrl->selected_fields[i].fields);
2488                         return -EOPNOTSUPP;
2489                 }
2490         }
2491 
2492         rc = ena_com_set_hash_ctrl(ena_dev);
2493 
2494         /* In case of failure, restore the old hash ctrl */
2495         if (unlikely(rc))
2496                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2497 
2498         return rc;
2499 }
2500 
2501 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2502                            enum ena_admin_flow_hash_proto proto,
2503                            u16 hash_fields)
2504 {
2505         struct ena_rss *rss = &ena_dev->rss;
2506         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2507         u16 supported_fields;
2508         int rc;
2509 
2510         if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2511                 pr_err("Invalid proto num (%u)\n", proto);
2512                 return -EINVAL;
2513         }
2514 
2515         /* Get the ctrl table */
2516         rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2517         if (unlikely(rc))
2518                 return rc;
2519 
2520         /* Make sure all the fields are supported */
2521         supported_fields = hash_ctrl->supported_fields[proto].fields;
2522         if ((hash_fields & supported_fields) != hash_fields) {
2523                 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2524                        proto, hash_fields, supported_fields);
2525         }
2526 
2527         hash_ctrl->selected_fields[proto].fields = hash_fields;
2528 
2529         rc = ena_com_set_hash_ctrl(ena_dev);
2530 
2531         /* In case of failure, restore the old hash ctrl */
2532         if (unlikely(rc))
2533                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2534 
2535         return 0;
2536 }
2537 
2538 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2539                                       u16 entry_idx, u16 entry_value)
2540 {
2541         struct ena_rss *rss = &ena_dev->rss;
2542 
2543         if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2544                 return -EINVAL;
2545 
2546         if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2547                 return -EINVAL;
2548 
2549         rss->host_rss_ind_tbl[entry_idx] = entry_value;
2550 
2551         return 0;
2552 }
2553 
2554 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2555 {
2556         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2557         struct ena_rss *rss = &ena_dev->rss;
2558         struct ena_admin_set_feat_cmd cmd;
2559         struct ena_admin_set_feat_resp resp;
2560         int ret;
2561 
2562         if (!ena_com_check_supported_feature_id(
2563                     ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2564                 pr_debug("Feature %d isn't supported\n",
2565                          ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2566                 return -EOPNOTSUPP;
2567         }
2568 
2569         ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2570         if (ret) {
2571                 pr_err("Failed to convert host indirection table to device table\n");
2572                 return ret;
2573         }
2574 
2575         memset(&cmd, 0x0, sizeof(cmd));
2576 
2577         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2578         cmd.aq_common_descriptor.flags =
2579                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2580         cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2581         cmd.u.ind_table.size = rss->tbl_log_size;
2582         cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2583 
2584         ret = ena_com_mem_addr_set(ena_dev,
2585                                    &cmd.control_buffer.address,
2586                                    rss->rss_ind_tbl_dma_addr);
2587         if (unlikely(ret)) {
2588                 pr_err("memory address set failed\n");
2589                 return ret;
2590         }
2591 
2592         cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2593                 sizeof(struct ena_admin_rss_ind_table_entry);
2594 
2595         ret = ena_com_execute_admin_command(admin_queue,
2596                                             (struct ena_admin_aq_entry *)&cmd,
2597                                             sizeof(cmd),
2598                                             (struct ena_admin_acq_entry *)&resp,
2599                                             sizeof(resp));
2600 
2601         if (unlikely(ret))
2602                 pr_err("Failed to set indirect table. error: %d\n", ret);
2603 
2604         return ret;
2605 }
2606 
2607 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2608 {
2609         struct ena_rss *rss = &ena_dev->rss;
2610         struct ena_admin_get_feat_resp get_resp;
2611         u32 tbl_size;
2612         int i, rc;
2613 
2614         tbl_size = (1ULL << rss->tbl_log_size) *
2615                 sizeof(struct ena_admin_rss_ind_table_entry);
2616 
2617         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2618                                     ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2619                                     rss->rss_ind_tbl_dma_addr,
2620                                     tbl_size, 0);
2621         if (unlikely(rc))
2622                 return rc;
2623 
2624         if (!ind_tbl)
2625                 return 0;
2626 
2627         for (i = 0; i < (1 << rss->tbl_log_size); i++)
2628                 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2629 
2630         return 0;
2631 }
2632 
2633 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2634 {
2635         int rc;
2636 
2637         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2638 
2639         rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2640         if (unlikely(rc))
2641                 goto err_indr_tbl;
2642 
2643         /* The following function might return unsupported in case the
2644          * device doesn't support setting the key / hash function. We can safely
2645          * ignore this error and have indirection table support only.
2646          */
2647         rc = ena_com_hash_key_allocate(ena_dev);
2648         if (unlikely(rc) && rc != -EOPNOTSUPP)
2649                 goto err_hash_key;
2650         else if (rc != -EOPNOTSUPP)
2651                 ena_com_hash_key_fill_default_key(ena_dev);
2652 
2653         rc = ena_com_hash_ctrl_init(ena_dev);
2654         if (unlikely(rc))
2655                 goto err_hash_ctrl;
2656 
2657         return 0;
2658 
2659 err_hash_ctrl:
2660         ena_com_hash_key_destroy(ena_dev);
2661 err_hash_key:
2662         ena_com_indirect_table_destroy(ena_dev);
2663 err_indr_tbl:
2664 
2665         return rc;
2666 }
2667 
2668 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2669 {
2670         ena_com_indirect_table_destroy(ena_dev);
2671         ena_com_hash_key_destroy(ena_dev);
2672         ena_com_hash_ctrl_destroy(ena_dev);
2673 
2674         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2675 }
2676 
2677 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2678 {
2679         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2680 
2681         host_attr->host_info =
2682                 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2683                                    &host_attr->host_info_dma_addr, GFP_KERNEL);
2684         if (unlikely(!host_attr->host_info))
2685                 return -ENOMEM;
2686 
2687         host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2688                 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2689                 (ENA_COMMON_SPEC_VERSION_MINOR));
2690 
2691         return 0;
2692 }
2693 
2694 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2695                                 u32 debug_area_size)
2696 {
2697         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2698 
2699         host_attr->debug_area_virt_addr =
2700                 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2701                                    &host_attr->debug_area_dma_addr,
2702                                    GFP_KERNEL);
2703         if (unlikely(!host_attr->debug_area_virt_addr)) {
2704                 host_attr->debug_area_size = 0;
2705                 return -ENOMEM;
2706         }
2707 
2708         host_attr->debug_area_size = debug_area_size;
2709 
2710         return 0;
2711 }
2712 
2713 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2714 {
2715         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2716 
2717         if (host_attr->host_info) {
2718                 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2719                                   host_attr->host_info_dma_addr);
2720                 host_attr->host_info = NULL;
2721         }
2722 }
2723 
2724 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2725 {
2726         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2727 
2728         if (host_attr->debug_area_virt_addr) {
2729                 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2730                                   host_attr->debug_area_virt_addr,
2731                                   host_attr->debug_area_dma_addr);
2732                 host_attr->debug_area_virt_addr = NULL;
2733         }
2734 }
2735 
2736 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2737 {
2738         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2739         struct ena_com_admin_queue *admin_queue;
2740         struct ena_admin_set_feat_cmd cmd;
2741         struct ena_admin_set_feat_resp resp;
2742 
2743         int ret;
2744 
2745         /* Host attribute config is called before ena_com_get_dev_attr_feat
2746          * so ena_com can't check if the feature is supported.
2747          */
2748 
2749         memset(&cmd, 0x0, sizeof(cmd));
2750         admin_queue = &ena_dev->admin_queue;
2751 
2752         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2753         cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2754 
2755         ret = ena_com_mem_addr_set(ena_dev,
2756                                    &cmd.u.host_attr.debug_ba,
2757                                    host_attr->debug_area_dma_addr);
2758         if (unlikely(ret)) {
2759                 pr_err("memory address set failed\n");
2760                 return ret;
2761         }
2762 
2763         ret = ena_com_mem_addr_set(ena_dev,
2764                                    &cmd.u.host_attr.os_info_ba,
2765                                    host_attr->host_info_dma_addr);
2766         if (unlikely(ret)) {
2767                 pr_err("memory address set failed\n");
2768                 return ret;
2769         }
2770 
2771         cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2772 
2773         ret = ena_com_execute_admin_command(admin_queue,
2774                                             (struct ena_admin_aq_entry *)&cmd,
2775                                             sizeof(cmd),
2776                                             (struct ena_admin_acq_entry *)&resp,
2777                                             sizeof(resp));
2778 
2779         if (unlikely(ret))
2780                 pr_err("Failed to set host attributes: %d\n", ret);
2781 
2782         return ret;
2783 }
2784 
2785 /* Interrupt moderation */
2786 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2787 {
2788         return ena_com_check_supported_feature_id(ena_dev,
2789                                                   ENA_ADMIN_INTERRUPT_MODERATION);
2790 }
2791 
2792 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2793                                                           u32 intr_delay_resolution,
2794                                                           u32 *intr_moder_interval)
2795 {
2796         if (!intr_delay_resolution) {
2797                 pr_err("Illegal interrupt delay granularity value\n");
2798                 return -EFAULT;
2799         }
2800 
2801         *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2802 
2803         return 0;
2804 }
2805 
2806 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2807                                                       u32 tx_coalesce_usecs)
2808 {
2809         return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2810                                                               ena_dev->intr_delay_resolution,
2811                                                               &ena_dev->intr_moder_tx_interval);
2812 }
2813 
2814 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2815                                                       u32 rx_coalesce_usecs)
2816 {
2817         return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2818                                                               ena_dev->intr_delay_resolution,
2819                                                               &ena_dev->intr_moder_rx_interval);
2820 }
2821 
2822 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2823 {
2824         struct ena_admin_get_feat_resp get_resp;
2825         u16 delay_resolution;
2826         int rc;
2827 
2828         rc = ena_com_get_feature(ena_dev, &get_resp,
2829                                  ENA_ADMIN_INTERRUPT_MODERATION, 0);
2830 
2831         if (rc) {
2832                 if (rc == -EOPNOTSUPP) {
2833                         pr_debug("Feature %d isn't supported\n",
2834                                  ENA_ADMIN_INTERRUPT_MODERATION);
2835                         rc = 0;
2836                 } else {
2837                         pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2838                                rc);
2839                 }
2840 
2841                 /* no moderation supported, disable adaptive support */
2842                 ena_com_disable_adaptive_moderation(ena_dev);
2843                 return rc;
2844         }
2845 
2846         /* if moderation is supported by device we set adaptive moderation */
2847         delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2848         ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2849 
2850         /* Disable adaptive moderation by default - can be enabled later */
2851         ena_com_disable_adaptive_moderation(ena_dev);
2852 
2853         return 0;
2854 }
2855 
2856 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2857 {
2858         return ena_dev->intr_moder_tx_interval;
2859 }
2860 
2861 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2862 {
2863         return ena_dev->intr_moder_rx_interval;
2864 }
2865 
2866 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2867                             struct ena_admin_feature_llq_desc *llq_features,
2868                             struct ena_llq_configurations *llq_default_cfg)
2869 {
2870         struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2871         int rc;
2872 
2873         if (!llq_features->max_llq_num) {
2874                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2875                 return 0;
2876         }
2877 
2878         rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2879         if (rc)
2880                 return rc;
2881 
2882         ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2883                 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2884 
2885         if (unlikely(ena_dev->tx_max_header_size == 0)) {
2886                 pr_err("the size of the LLQ entry is smaller than needed\n");
2887                 return -EINVAL;
2888         }
2889 
2890         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2891 
2892         return 0;
2893 }

/* [<][>][^][v][top][bottom][index][help] */