root/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hclgevf_ring_space
  2. hclgevf_is_valid_csq_clean_head
  3. hclgevf_cmd_csq_clean
  4. hclgevf_cmd_csq_done
  5. hclgevf_is_special_opcode
  6. hclgevf_cmd_config_regs
  7. hclgevf_cmd_init_regs
  8. hclgevf_alloc_cmd_desc
  9. hclgevf_free_cmd_desc
  10. hclgevf_alloc_cmd_queue
  11. hclgevf_cmd_setup_basic_desc
  12. hclgevf_cmd_convert_err_code
  13. hclgevf_cmd_send
  14. hclgevf_cmd_query_firmware_version
  15. hclgevf_cmd_queue_init
  16. hclgevf_cmd_init
  17. hclgevf_cmd_uninit_regs
  18. hclgevf_cmd_uninit

   1 // SPDX-License-Identifier: GPL-2.0+
   2 // Copyright (c) 2016-2017 Hisilicon Limited.
   3 
   4 #include <linux/device.h>
   5 #include <linux/dma-direction.h>
   6 #include <linux/dma-mapping.h>
   7 #include <linux/err.h>
   8 #include <linux/pci.h>
   9 #include <linux/slab.h>
  10 #include "hclgevf_cmd.h"
  11 #include "hclgevf_main.h"
  12 #include "hnae3.h"
  13 
  14 #define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
  15 #define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
  16                                         DMA_TO_DEVICE : DMA_FROM_DEVICE)
  17 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
  18 
  19 static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
  20 {
  21         int ntc = ring->next_to_clean;
  22         int ntu = ring->next_to_use;
  23         int used;
  24 
  25         used = (ntu - ntc + ring->desc_num) % ring->desc_num;
  26 
  27         return ring->desc_num - used - 1;
  28 }
  29 
  30 static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
  31                                            int head)
  32 {
  33         int ntu = ring->next_to_use;
  34         int ntc = ring->next_to_clean;
  35 
  36         if (ntu > ntc)
  37                 return head >= ntc && head <= ntu;
  38 
  39         return head >= ntc || head <= ntu;
  40 }
  41 
  42 static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
  43 {
  44         struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
  45         struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
  46         int clean;
  47         u32 head;
  48 
  49         head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
  50         rmb(); /* Make sure head is ready before touch any data */
  51 
  52         if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
  53                 dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
  54                          csq->next_to_use, csq->next_to_clean);
  55                 dev_warn(&hdev->pdev->dev,
  56                          "Disabling any further commands to IMP firmware\n");
  57                 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
  58                 return -EIO;
  59         }
  60 
  61         clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
  62         csq->next_to_clean = head;
  63         return clean;
  64 }
  65 
  66 static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
  67 {
  68         u32 head;
  69 
  70         head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
  71 
  72         return head == hw->cmq.csq.next_to_use;
  73 }
  74 
  75 static bool hclgevf_is_special_opcode(u16 opcode)
  76 {
  77         static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
  78         int i;
  79 
  80         for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
  81                 if (spec_opcode[i] == opcode)
  82                         return true;
  83         }
  84 
  85         return false;
  86 }
  87 
  88 static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
  89 {
  90         struct hclgevf_dev *hdev = ring->dev;
  91         struct hclgevf_hw *hw = &hdev->hw;
  92         u32 reg_val;
  93 
  94         if (ring->flag == HCLGEVF_TYPE_CSQ) {
  95                 reg_val = (u32)ring->desc_dma_addr;
  96                 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
  97                 reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
  98                 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
  99 
 100                 reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
 101                 reg_val &= HCLGEVF_NIC_SW_RST_RDY;
 102                 reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
 103                 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
 104 
 105                 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
 106                 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
 107         } else {
 108                 reg_val = (u32)ring->desc_dma_addr;
 109                 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
 110                 reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
 111                 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
 112 
 113                 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
 114                 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
 115 
 116                 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
 117                 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
 118         }
 119 }
 120 
 121 static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
 122 {
 123         hclgevf_cmd_config_regs(&hw->cmq.csq);
 124         hclgevf_cmd_config_regs(&hw->cmq.crq);
 125 }
 126 
 127 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
 128 {
 129         int size = ring->desc_num * sizeof(struct hclgevf_desc);
 130 
 131         ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
 132                                         &ring->desc_dma_addr, GFP_KERNEL);
 133         if (!ring->desc)
 134                 return -ENOMEM;
 135 
 136         return 0;
 137 }
 138 
 139 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
 140 {
 141         int size  = ring->desc_num * sizeof(struct hclgevf_desc);
 142 
 143         if (ring->desc) {
 144                 dma_free_coherent(cmq_ring_to_dev(ring), size,
 145                                   ring->desc, ring->desc_dma_addr);
 146                 ring->desc = NULL;
 147         }
 148 }
 149 
 150 static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
 151 {
 152         struct hclgevf_hw *hw = &hdev->hw;
 153         struct hclgevf_cmq_ring *ring =
 154                 (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
 155         int ret;
 156 
 157         ring->dev = hdev;
 158         ring->flag = ring_type;
 159 
 160         /* allocate CSQ/CRQ descriptor */
 161         ret = hclgevf_alloc_cmd_desc(ring);
 162         if (ret)
 163                 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
 164                         (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
 165 
 166         return ret;
 167 }
 168 
 169 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
 170                                   enum hclgevf_opcode_type opcode, bool is_read)
 171 {
 172         memset(desc, 0, sizeof(struct hclgevf_desc));
 173         desc->opcode = cpu_to_le16(opcode);
 174         desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
 175                                  HCLGEVF_CMD_FLAG_IN);
 176         if (is_read)
 177                 desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
 178         else
 179                 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
 180 }
 181 
 182 static int hclgevf_cmd_convert_err_code(u16 desc_ret)
 183 {
 184         switch (desc_ret) {
 185         case HCLGEVF_CMD_EXEC_SUCCESS:
 186                 return 0;
 187         case HCLGEVF_CMD_NO_AUTH:
 188                 return -EPERM;
 189         case HCLGEVF_CMD_NOT_SUPPORTED:
 190                 return -EOPNOTSUPP;
 191         case HCLGEVF_CMD_QUEUE_FULL:
 192                 return -EXFULL;
 193         case HCLGEVF_CMD_NEXT_ERR:
 194                 return -ENOSR;
 195         case HCLGEVF_CMD_UNEXE_ERR:
 196                 return -ENOTBLK;
 197         case HCLGEVF_CMD_PARA_ERR:
 198                 return -EINVAL;
 199         case HCLGEVF_CMD_RESULT_ERR:
 200                 return -ERANGE;
 201         case HCLGEVF_CMD_TIMEOUT:
 202                 return -ETIME;
 203         case HCLGEVF_CMD_HILINK_ERR:
 204                 return -ENOLINK;
 205         case HCLGEVF_CMD_QUEUE_ILLEGAL:
 206                 return -ENXIO;
 207         case HCLGEVF_CMD_INVALID:
 208                 return -EBADR;
 209         default:
 210                 return -EIO;
 211         }
 212 }
 213 
 214 /* hclgevf_cmd_send - send command to command queue
 215  * @hw: pointer to the hw struct
 216  * @desc: prefilled descriptor for describing the command
 217  * @num : the number of descriptors to be sent
 218  *
 219  * This is the main send command for command queue, it
 220  * sends the queue, cleans the queue, etc
 221  */
 222 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
 223 {
 224         struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
 225         struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
 226         struct hclgevf_desc *desc_to_use;
 227         bool complete = false;
 228         u32 timeout = 0;
 229         int handle = 0;
 230         int status = 0;
 231         u16 retval;
 232         u16 opcode;
 233         int ntc;
 234 
 235         spin_lock_bh(&hw->cmq.csq.lock);
 236 
 237         if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
 238                 spin_unlock_bh(&hw->cmq.csq.lock);
 239                 return -EBUSY;
 240         }
 241 
 242         if (num > hclgevf_ring_space(&hw->cmq.csq)) {
 243                 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
 244                  * need update the SW HEAD pointer csq->next_to_clean
 245                  */
 246                 csq->next_to_clean = hclgevf_read_dev(hw,
 247                                                       HCLGEVF_NIC_CSQ_HEAD_REG);
 248                 spin_unlock_bh(&hw->cmq.csq.lock);
 249                 return -EBUSY;
 250         }
 251 
 252         /* Record the location of desc in the ring for this time
 253          * which will be use for hardware to write back
 254          */
 255         ntc = hw->cmq.csq.next_to_use;
 256         opcode = le16_to_cpu(desc[0].opcode);
 257         while (handle < num) {
 258                 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
 259                 *desc_to_use = desc[handle];
 260                 (hw->cmq.csq.next_to_use)++;
 261                 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
 262                         hw->cmq.csq.next_to_use = 0;
 263                 handle++;
 264         }
 265 
 266         /* Write to hardware */
 267         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
 268                           hw->cmq.csq.next_to_use);
 269 
 270         /* If the command is sync, wait for the firmware to write back,
 271          * if multi descriptors to be sent, use the first one to check
 272          */
 273         if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
 274                 do {
 275                         if (hclgevf_cmd_csq_done(hw))
 276                                 break;
 277                         udelay(1);
 278                         timeout++;
 279                 } while (timeout < hw->cmq.tx_timeout);
 280         }
 281 
 282         if (hclgevf_cmd_csq_done(hw)) {
 283                 complete = true;
 284                 handle = 0;
 285 
 286                 while (handle < num) {
 287                         /* Get the result of hardware write back */
 288                         desc_to_use = &hw->cmq.csq.desc[ntc];
 289                         desc[handle] = *desc_to_use;
 290 
 291                         if (likely(!hclgevf_is_special_opcode(opcode)))
 292                                 retval = le16_to_cpu(desc[handle].retval);
 293                         else
 294                                 retval = le16_to_cpu(desc[0].retval);
 295 
 296                         status = hclgevf_cmd_convert_err_code(retval);
 297                         hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
 298                         ntc++;
 299                         handle++;
 300                         if (ntc == hw->cmq.csq.desc_num)
 301                                 ntc = 0;
 302                 }
 303         }
 304 
 305         if (!complete)
 306                 status = -EBADE;
 307 
 308         /* Clean the command send queue */
 309         handle = hclgevf_cmd_csq_clean(hw);
 310         if (handle != num)
 311                 dev_warn(&hdev->pdev->dev,
 312                          "cleaned %d, need to clean %d\n", handle, num);
 313 
 314         spin_unlock_bh(&hw->cmq.csq.lock);
 315 
 316         return status;
 317 }
 318 
 319 static int  hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
 320                                                u32 *version)
 321 {
 322         struct hclgevf_query_version_cmd *resp;
 323         struct hclgevf_desc desc;
 324         int status;
 325 
 326         resp = (struct hclgevf_query_version_cmd *)desc.data;
 327 
 328         hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
 329         status = hclgevf_cmd_send(hw, &desc, 1);
 330         if (!status)
 331                 *version = le32_to_cpu(resp->firmware);
 332 
 333         return status;
 334 }
 335 
 336 int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
 337 {
 338         int ret;
 339 
 340         /* Setup the lock for command queue */
 341         spin_lock_init(&hdev->hw.cmq.csq.lock);
 342         spin_lock_init(&hdev->hw.cmq.crq.lock);
 343 
 344         hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
 345         hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
 346         hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
 347 
 348         ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
 349         if (ret) {
 350                 dev_err(&hdev->pdev->dev,
 351                         "CSQ ring setup error %d\n", ret);
 352                 return ret;
 353         }
 354 
 355         ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
 356         if (ret) {
 357                 dev_err(&hdev->pdev->dev,
 358                         "CRQ ring setup error %d\n", ret);
 359                 goto err_csq;
 360         }
 361 
 362         return 0;
 363 err_csq:
 364         hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
 365         return ret;
 366 }
 367 
 368 int hclgevf_cmd_init(struct hclgevf_dev *hdev)
 369 {
 370         u32 version;
 371         int ret;
 372 
 373         spin_lock_bh(&hdev->hw.cmq.csq.lock);
 374         spin_lock(&hdev->hw.cmq.crq.lock);
 375 
 376         /* initialize the pointers of async rx queue of mailbox */
 377         hdev->arq.hdev = hdev;
 378         hdev->arq.head = 0;
 379         hdev->arq.tail = 0;
 380         atomic_set(&hdev->arq.count, 0);
 381         hdev->hw.cmq.csq.next_to_clean = 0;
 382         hdev->hw.cmq.csq.next_to_use = 0;
 383         hdev->hw.cmq.crq.next_to_clean = 0;
 384         hdev->hw.cmq.crq.next_to_use = 0;
 385 
 386         hclgevf_cmd_init_regs(&hdev->hw);
 387 
 388         spin_unlock(&hdev->hw.cmq.crq.lock);
 389         spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 390 
 391         clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
 392 
 393         /* Check if there is new reset pending, because the higher level
 394          * reset may happen when lower level reset is being processed.
 395          */
 396         if (hclgevf_is_reset_pending(hdev)) {
 397                 ret = -EBUSY;
 398                 goto err_cmd_init;
 399         }
 400 
 401         /* get firmware version */
 402         ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
 403         if (ret) {
 404                 dev_err(&hdev->pdev->dev,
 405                         "failed(%d) to query firmware version\n", ret);
 406                 goto err_cmd_init;
 407         }
 408         hdev->fw_version = version;
 409 
 410         dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
 411                  hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
 412                                  HNAE3_FW_VERSION_BYTE3_SHIFT),
 413                  hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
 414                                  HNAE3_FW_VERSION_BYTE2_SHIFT),
 415                  hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
 416                                  HNAE3_FW_VERSION_BYTE1_SHIFT),
 417                  hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
 418                                  HNAE3_FW_VERSION_BYTE0_SHIFT));
 419 
 420         return 0;
 421 
 422 err_cmd_init:
 423         set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
 424 
 425         return ret;
 426 }
 427 
 428 static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
 429 {
 430         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
 431         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
 432         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
 433         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
 434         hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
 435         hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
 436         hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
 437         hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
 438         hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
 439         hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
 440 }
 441 
 442 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
 443 {
 444         spin_lock_bh(&hdev->hw.cmq.csq.lock);
 445         spin_lock(&hdev->hw.cmq.crq.lock);
 446         clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
 447         hclgevf_cmd_uninit_regs(&hdev->hw);
 448         spin_unlock(&hdev->hw.cmq.crq.lock);
 449         spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 450         hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
 451         hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
 452 }

/* [<][>][^][v][top][bottom][index][help] */