root/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_nixlf_attached
  2. rvu_get_nixlf_count
  3. nix_mce_list_init
  4. nix_alloc_mce_list
  5. get_nix_hw
  6. nix_rx_sync
  7. is_valid_txschq
  8. nix_interface_init
  9. nix_interface_deinit
  10. nix_setup_lso_tso_l3
  11. nix_setup_lso_tso_l4
  12. nix_setup_lso
  13. nix_ctx_free
  14. nixlf_rss_ctx_init
  15. nix_aq_enqueue_wait
  16. rvu_nix_aq_enq_inst
  17. nix_lf_hwctx_disable
  18. rvu_mbox_handler_nix_aq_enq
  19. rvu_mbox_handler_nix_hwctx_disable
  20. rvu_mbox_handler_nix_lf_alloc
  21. rvu_mbox_handler_nix_lf_free
  22. rvu_mbox_handler_nix_mark_format_cfg
  23. nix_reset_tx_shaping
  24. nix_reset_tx_linkcfg
  25. rvu_get_tl1_schqs
  26. rvu_mbox_handler_nix_txsch_alloc
  27. nix_txschq_free
  28. nix_txschq_free_one
  29. rvu_mbox_handler_nix_txsch_free
  30. is_txschq_config_valid
  31. nix_tl1_default_cfg
  32. rvu_mbox_handler_nix_txschq_cfg
  33. nix_rx_vtag_cfg
  34. rvu_mbox_handler_nix_vtag_cfg
  35. nix_setup_mce
  36. nix_update_mce_list
  37. nix_update_bcast_mce_list
  38. nix_setup_bcast_tables
  39. nix_setup_mcast
  40. nix_setup_txschq
  41. rvu_nix_reserve_mark_format
  42. nix_af_mark_format_setup
  43. rvu_mbox_handler_nix_stats_rst
  44. get_flowkey_alg_idx
  45. set_flowkey_fields
  46. reserve_flowkey_alg_idx
  47. rvu_mbox_handler_nix_rss_flowkey_cfg
  48. nix_rx_flowkey_alg_cfg
  49. rvu_mbox_handler_nix_set_mac_addr
  50. rvu_mbox_handler_nix_set_rx_mode
  51. nix_find_link_frs
  52. rvu_mbox_handler_nix_set_hw_frs
  53. rvu_mbox_handler_nix_rxvlan_alloc
  54. rvu_mbox_handler_nix_set_rx_cfg
  55. nix_link_config
  56. nix_calibrate_x2p
  57. nix_aq_init
  58. rvu_nix_init
  59. rvu_nix_freemem
  60. nix_get_nixlf
  61. rvu_mbox_handler_nix_lf_start_rx
  62. rvu_mbox_handler_nix_lf_stop_rx
  63. rvu_nix_lf_teardown
  64. rvu_mbox_handler_nix_lso_format_cfg

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Marvell OcteonTx2 RVU Admin Function driver
   3  *
   4  * Copyright (C) 2018 Marvell International Ltd.
   5  *
   6  * This program is free software; you can redistribute it and/or modify
   7  * it under the terms of the GNU General Public License version 2 as
   8  * published by the Free Software Foundation.
   9  */
  10 
  11 #include <linux/module.h>
  12 #include <linux/pci.h>
  13 
  14 #include "rvu_struct.h"
  15 #include "rvu_reg.h"
  16 #include "rvu.h"
  17 #include "npc.h"
  18 #include "cgx.h"
  19 
  20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
  21 
  22 enum mc_tbl_sz {
  23         MC_TBL_SZ_256,
  24         MC_TBL_SZ_512,
  25         MC_TBL_SZ_1K,
  26         MC_TBL_SZ_2K,
  27         MC_TBL_SZ_4K,
  28         MC_TBL_SZ_8K,
  29         MC_TBL_SZ_16K,
  30         MC_TBL_SZ_32K,
  31         MC_TBL_SZ_64K,
  32 };
  33 
  34 enum mc_buf_cnt {
  35         MC_BUF_CNT_8,
  36         MC_BUF_CNT_16,
  37         MC_BUF_CNT_32,
  38         MC_BUF_CNT_64,
  39         MC_BUF_CNT_128,
  40         MC_BUF_CNT_256,
  41         MC_BUF_CNT_512,
  42         MC_BUF_CNT_1024,
  43         MC_BUF_CNT_2048,
  44 };
  45 
  46 enum nix_makr_fmt_indexes {
  47         NIX_MARK_CFG_IP_DSCP_RED,
  48         NIX_MARK_CFG_IP_DSCP_YELLOW,
  49         NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
  50         NIX_MARK_CFG_IP_ECN_RED,
  51         NIX_MARK_CFG_IP_ECN_YELLOW,
  52         NIX_MARK_CFG_IP_ECN_YELLOW_RED,
  53         NIX_MARK_CFG_VLAN_DEI_RED,
  54         NIX_MARK_CFG_VLAN_DEI_YELLOW,
  55         NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
  56         NIX_MARK_CFG_MAX,
  57 };
  58 
  59 /* For now considering MC resources needed for broadcast
  60  * pkt replication only. i.e 256 HWVFs + 12 PFs.
  61  */
  62 #define MC_TBL_SIZE     MC_TBL_SZ_512
  63 #define MC_BUF_CNT      MC_BUF_CNT_128
  64 
  65 struct mce {
  66         struct hlist_node       node;
  67         u16                     idx;
  68         u16                     pcifunc;
  69 };
  70 
  71 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
  72 {
  73         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
  74         int blkaddr;
  75 
  76         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
  77         if (!pfvf->nixlf || blkaddr < 0)
  78                 return false;
  79         return true;
  80 }
  81 
  82 int rvu_get_nixlf_count(struct rvu *rvu)
  83 {
  84         struct rvu_block *block;
  85         int blkaddr;
  86 
  87         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
  88         if (blkaddr < 0)
  89                 return 0;
  90         block = &rvu->hw->block[blkaddr];
  91         return block->lf.max;
  92 }
  93 
  94 static void nix_mce_list_init(struct nix_mce_list *list, int max)
  95 {
  96         INIT_HLIST_HEAD(&list->head);
  97         list->count = 0;
  98         list->max = max;
  99 }
 100 
 101 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
 102 {
 103         int idx;
 104 
 105         if (!mcast)
 106                 return 0;
 107 
 108         idx = mcast->next_free_mce;
 109         mcast->next_free_mce += count;
 110         return idx;
 111 }
 112 
 113 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 114 {
 115         if (blkaddr == BLKADDR_NIX0 && hw->nix0)
 116                 return hw->nix0;
 117 
 118         return NULL;
 119 }
 120 
 121 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
 122 {
 123         int err;
 124 
 125         /*Sync all in flight RX packets to LLC/DRAM */
 126         rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
 127         err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
 128         if (err)
 129                 dev_err(rvu->dev, "NIX RX software sync failed\n");
 130 
 131         /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
 132          * bit too early. Hence wait for 50us more.
 133          */
 134         if (is_rvu_9xxx_A0(rvu))
 135                 usleep_range(50, 60);
 136 }
 137 
 138 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 139                             int lvl, u16 pcifunc, u16 schq)
 140 {
 141         struct nix_txsch *txsch;
 142         struct nix_hw *nix_hw;
 143         u16 map_func;
 144 
 145         nix_hw = get_nix_hw(rvu->hw, blkaddr);
 146         if (!nix_hw)
 147                 return false;
 148 
 149         txsch = &nix_hw->txsch[lvl];
 150         /* Check out of bounds */
 151         if (schq >= txsch->schq.max)
 152                 return false;
 153 
 154         mutex_lock(&rvu->rsrc_lock);
 155         map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
 156         mutex_unlock(&rvu->rsrc_lock);
 157 
 158         /* For TL1 schq, sharing across VF's of same PF is ok */
 159         if (lvl == NIX_TXSCH_LVL_TL1 &&
 160             rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
 161                 return false;
 162 
 163         if (lvl != NIX_TXSCH_LVL_TL1 &&
 164             map_func != pcifunc)
 165                 return false;
 166 
 167         return true;
 168 }
 169 
 170 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
 171 {
 172         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 173         u8 cgx_id, lmac_id;
 174         int pkind, pf, vf;
 175         int err;
 176 
 177         pf = rvu_get_pf(pcifunc);
 178         if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
 179                 return 0;
 180 
 181         switch (type) {
 182         case NIX_INTF_TYPE_CGX:
 183                 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
 184                 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
 185 
 186                 pkind = rvu_npc_get_pkind(rvu, pf);
 187                 if (pkind < 0) {
 188                         dev_err(rvu->dev,
 189                                 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
 190                         return -EINVAL;
 191                 }
 192                 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
 193                 pfvf->tx_chan_base = pfvf->rx_chan_base;
 194                 pfvf->rx_chan_cnt = 1;
 195                 pfvf->tx_chan_cnt = 1;
 196                 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
 197                 rvu_npc_set_pkind(rvu, pkind, pfvf);
 198                 break;
 199         case NIX_INTF_TYPE_LBK:
 200                 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
 201                 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
 202                 pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
 203                                                 NIX_CHAN_LBK_CHX(0, vf + 1);
 204                 pfvf->rx_chan_cnt = 1;
 205                 pfvf->tx_chan_cnt = 1;
 206                 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
 207                                               pfvf->rx_chan_base, false);
 208                 break;
 209         }
 210 
 211         /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
 212          * RVU PF/VF's MAC address.
 213          */
 214         rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
 215                                     pfvf->rx_chan_base, pfvf->mac_addr);
 216 
 217         /* Add this PF_FUNC to bcast pkt replication list */
 218         err = nix_update_bcast_mce_list(rvu, pcifunc, true);
 219         if (err) {
 220                 dev_err(rvu->dev,
 221                         "Bcast list, failed to enable PF_FUNC 0x%x\n",
 222                         pcifunc);
 223                 return err;
 224         }
 225 
 226         rvu_npc_install_bcast_match_entry(rvu, pcifunc,
 227                                           nixlf, pfvf->rx_chan_base);
 228         pfvf->maxlen = NIC_HW_MIN_FRS;
 229         pfvf->minlen = NIC_HW_MIN_FRS;
 230 
 231         return 0;
 232 }
 233 
 234 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
 235 {
 236         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 237         int err;
 238 
 239         pfvf->maxlen = 0;
 240         pfvf->minlen = 0;
 241         pfvf->rxvlan = false;
 242 
 243         /* Remove this PF_FUNC from bcast pkt replication list */
 244         err = nix_update_bcast_mce_list(rvu, pcifunc, false);
 245         if (err) {
 246                 dev_err(rvu->dev,
 247                         "Bcast list, failed to disable PF_FUNC 0x%x\n",
 248                         pcifunc);
 249         }
 250 
 251         /* Free and disable any MCAM entries used by this NIX LF */
 252         rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
 253 }
 254 
 255 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
 256                                  u64 format, bool v4, u64 *fidx)
 257 {
 258         struct nix_lso_format field = {0};
 259 
 260         /* IP's Length field */
 261         field.layer = NIX_TXLAYER_OL3;
 262         /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
 263         field.offset = v4 ? 2 : 4;
 264         field.sizem1 = 1; /* i.e 2 bytes */
 265         field.alg = NIX_LSOALG_ADD_PAYLEN;
 266         rvu_write64(rvu, blkaddr,
 267                     NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 268                     *(u64 *)&field);
 269 
 270         /* No ID field in IPv6 header */
 271         if (!v4)
 272                 return;
 273 
 274         /* IP's ID field */
 275         field.layer = NIX_TXLAYER_OL3;
 276         field.offset = 4;
 277         field.sizem1 = 1; /* i.e 2 bytes */
 278         field.alg = NIX_LSOALG_ADD_SEGNUM;
 279         rvu_write64(rvu, blkaddr,
 280                     NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 281                     *(u64 *)&field);
 282 }
 283 
 284 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
 285                                  u64 format, u64 *fidx)
 286 {
 287         struct nix_lso_format field = {0};
 288 
 289         /* TCP's sequence number field */
 290         field.layer = NIX_TXLAYER_OL4;
 291         field.offset = 4;
 292         field.sizem1 = 3; /* i.e 4 bytes */
 293         field.alg = NIX_LSOALG_ADD_OFFSET;
 294         rvu_write64(rvu, blkaddr,
 295                     NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 296                     *(u64 *)&field);
 297 
 298         /* TCP's flags field */
 299         field.layer = NIX_TXLAYER_OL4;
 300         field.offset = 12;
 301         field.sizem1 = 1; /* 2 bytes */
 302         field.alg = NIX_LSOALG_TCP_FLAGS;
 303         rvu_write64(rvu, blkaddr,
 304                     NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 305                     *(u64 *)&field);
 306 }
 307 
 308 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 309 {
 310         u64 cfg, idx, fidx = 0;
 311 
 312         /* Get max HW supported format indices */
 313         cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
 314         nix_hw->lso.total = cfg;
 315 
 316         /* Enable LSO */
 317         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
 318         /* For TSO, set first and middle segment flags to
 319          * mask out PSH, RST & FIN flags in TCP packet
 320          */
 321         cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
 322         cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
 323         rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
 324 
 325         /* Setup default static LSO formats
 326          *
 327          * Configure format fields for TCPv4 segmentation offload
 328          */
 329         idx = NIX_LSO_FORMAT_IDX_TSOV4;
 330         nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
 331         nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 332 
 333         /* Set rest of the fields to NOP */
 334         for (; fidx < 8; fidx++) {
 335                 rvu_write64(rvu, blkaddr,
 336                             NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 337         }
 338         nix_hw->lso.in_use++;
 339 
 340         /* Configure format fields for TCPv6 segmentation offload */
 341         idx = NIX_LSO_FORMAT_IDX_TSOV6;
 342         fidx = 0;
 343         nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
 344         nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 345 
 346         /* Set rest of the fields to NOP */
 347         for (; fidx < 8; fidx++) {
 348                 rvu_write64(rvu, blkaddr,
 349                             NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 350         }
 351         nix_hw->lso.in_use++;
 352 }
 353 
 354 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 355 {
 356         kfree(pfvf->rq_bmap);
 357         kfree(pfvf->sq_bmap);
 358         kfree(pfvf->cq_bmap);
 359         if (pfvf->rq_ctx)
 360                 qmem_free(rvu->dev, pfvf->rq_ctx);
 361         if (pfvf->sq_ctx)
 362                 qmem_free(rvu->dev, pfvf->sq_ctx);
 363         if (pfvf->cq_ctx)
 364                 qmem_free(rvu->dev, pfvf->cq_ctx);
 365         if (pfvf->rss_ctx)
 366                 qmem_free(rvu->dev, pfvf->rss_ctx);
 367         if (pfvf->nix_qints_ctx)
 368                 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
 369         if (pfvf->cq_ints_ctx)
 370                 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
 371 
 372         pfvf->rq_bmap = NULL;
 373         pfvf->cq_bmap = NULL;
 374         pfvf->sq_bmap = NULL;
 375         pfvf->rq_ctx = NULL;
 376         pfvf->sq_ctx = NULL;
 377         pfvf->cq_ctx = NULL;
 378         pfvf->rss_ctx = NULL;
 379         pfvf->nix_qints_ctx = NULL;
 380         pfvf->cq_ints_ctx = NULL;
 381 }
 382 
 383 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
 384                               struct rvu_pfvf *pfvf, int nixlf,
 385                               int rss_sz, int rss_grps, int hwctx_size)
 386 {
 387         int err, grp, num_indices;
 388 
 389         /* RSS is not requested for this NIXLF */
 390         if (!rss_sz)
 391                 return 0;
 392         num_indices = rss_sz * rss_grps;
 393 
 394         /* Alloc NIX RSS HW context memory and config the base */
 395         err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
 396         if (err)
 397                 return err;
 398 
 399         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
 400                     (u64)pfvf->rss_ctx->iova);
 401 
 402         /* Config full RSS table size, enable RSS and caching */
 403         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
 404                     BIT_ULL(36) | BIT_ULL(4) |
 405                     ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
 406         /* Config RSS group offset and sizes */
 407         for (grp = 0; grp < rss_grps; grp++)
 408                 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
 409                             ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
 410         return 0;
 411 }
 412 
 413 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
 414                                struct nix_aq_inst_s *inst)
 415 {
 416         struct admin_queue *aq = block->aq;
 417         struct nix_aq_res_s *result;
 418         int timeout = 1000;
 419         u64 reg, head;
 420 
 421         result = (struct nix_aq_res_s *)aq->res->base;
 422 
 423         /* Get current head pointer where to append this instruction */
 424         reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
 425         head = (reg >> 4) & AQ_PTR_MASK;
 426 
 427         memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
 428                (void *)inst, aq->inst->entry_sz);
 429         memset(result, 0, sizeof(*result));
 430         /* sync into memory */
 431         wmb();
 432 
 433         /* Ring the doorbell and wait for result */
 434         rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
 435         while (result->compcode == NIX_AQ_COMP_NOTDONE) {
 436                 cpu_relax();
 437                 udelay(1);
 438                 timeout--;
 439                 if (!timeout)
 440                         return -EBUSY;
 441         }
 442 
 443         if (result->compcode != NIX_AQ_COMP_GOOD)
 444                 /* TODO: Replace this with some error code */
 445                 return -EBUSY;
 446 
 447         return 0;
 448 }
 449 
 450 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 451                                struct nix_aq_enq_rsp *rsp)
 452 {
 453         struct rvu_hwinfo *hw = rvu->hw;
 454         u16 pcifunc = req->hdr.pcifunc;
 455         int nixlf, blkaddr, rc = 0;
 456         struct nix_aq_inst_s inst;
 457         struct rvu_block *block;
 458         struct admin_queue *aq;
 459         struct rvu_pfvf *pfvf;
 460         void *ctx, *mask;
 461         bool ena;
 462         u64 cfg;
 463 
 464         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 465         if (blkaddr < 0)
 466                 return NIX_AF_ERR_AF_LF_INVALID;
 467 
 468         block = &hw->block[blkaddr];
 469         aq = block->aq;
 470         if (!aq) {
 471                 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
 472                 return NIX_AF_ERR_AQ_ENQUEUE;
 473         }
 474 
 475         pfvf = rvu_get_pfvf(rvu, pcifunc);
 476         nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 477 
 478         /* Skip NIXLF check for broadcast MCE entry init */
 479         if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
 480                 if (!pfvf->nixlf || nixlf < 0)
 481                         return NIX_AF_ERR_AF_LF_INVALID;
 482         }
 483 
 484         switch (req->ctype) {
 485         case NIX_AQ_CTYPE_RQ:
 486                 /* Check if index exceeds max no of queues */
 487                 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
 488                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 489                 break;
 490         case NIX_AQ_CTYPE_SQ:
 491                 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
 492                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 493                 break;
 494         case NIX_AQ_CTYPE_CQ:
 495                 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
 496                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 497                 break;
 498         case NIX_AQ_CTYPE_RSS:
 499                 /* Check if RSS is enabled and qidx is within range */
 500                 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
 501                 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
 502                     (req->qidx >= (256UL << (cfg & 0xF))))
 503                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 504                 break;
 505         case NIX_AQ_CTYPE_MCE:
 506                 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
 507                 /* Check if index exceeds MCE list length */
 508                 if (!hw->nix0->mcast.mce_ctx ||
 509                     (req->qidx >= (256UL << (cfg & 0xF))))
 510                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 511 
 512                 /* Adding multicast lists for requests from PF/VFs is not
 513                  * yet supported, so ignore this.
 514                  */
 515                 if (rsp)
 516                         rc = NIX_AF_ERR_AQ_ENQUEUE;
 517                 break;
 518         default:
 519                 rc = NIX_AF_ERR_AQ_ENQUEUE;
 520         }
 521 
 522         if (rc)
 523                 return rc;
 524 
 525         /* Check if SQ pointed SMQ belongs to this PF/VF or not */
 526         if (req->ctype == NIX_AQ_CTYPE_SQ &&
 527             ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
 528              (req->op == NIX_AQ_INSTOP_WRITE &&
 529               req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
 530                 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
 531                                      pcifunc, req->sq.smq))
 532                         return NIX_AF_ERR_AQ_ENQUEUE;
 533         }
 534 
 535         memset(&inst, 0, sizeof(struct nix_aq_inst_s));
 536         inst.lf = nixlf;
 537         inst.cindex = req->qidx;
 538         inst.ctype = req->ctype;
 539         inst.op = req->op;
 540         /* Currently we are not supporting enqueuing multiple instructions,
 541          * so always choose first entry in result memory.
 542          */
 543         inst.res_addr = (u64)aq->res->iova;
 544 
 545         /* Clean result + context memory */
 546         memset(aq->res->base, 0, aq->res->entry_sz);
 547         /* Context needs to be written at RES_ADDR + 128 */
 548         ctx = aq->res->base + 128;
 549         /* Mask needs to be written at RES_ADDR + 256 */
 550         mask = aq->res->base + 256;
 551 
 552         switch (req->op) {
 553         case NIX_AQ_INSTOP_WRITE:
 554                 if (req->ctype == NIX_AQ_CTYPE_RQ)
 555                         memcpy(mask, &req->rq_mask,
 556                                sizeof(struct nix_rq_ctx_s));
 557                 else if (req->ctype == NIX_AQ_CTYPE_SQ)
 558                         memcpy(mask, &req->sq_mask,
 559                                sizeof(struct nix_sq_ctx_s));
 560                 else if (req->ctype == NIX_AQ_CTYPE_CQ)
 561                         memcpy(mask, &req->cq_mask,
 562                                sizeof(struct nix_cq_ctx_s));
 563                 else if (req->ctype == NIX_AQ_CTYPE_RSS)
 564                         memcpy(mask, &req->rss_mask,
 565                                sizeof(struct nix_rsse_s));
 566                 else if (req->ctype == NIX_AQ_CTYPE_MCE)
 567                         memcpy(mask, &req->mce_mask,
 568                                sizeof(struct nix_rx_mce_s));
 569                 /* Fall through */
 570         case NIX_AQ_INSTOP_INIT:
 571                 if (req->ctype == NIX_AQ_CTYPE_RQ)
 572                         memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
 573                 else if (req->ctype == NIX_AQ_CTYPE_SQ)
 574                         memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
 575                 else if (req->ctype == NIX_AQ_CTYPE_CQ)
 576                         memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
 577                 else if (req->ctype == NIX_AQ_CTYPE_RSS)
 578                         memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
 579                 else if (req->ctype == NIX_AQ_CTYPE_MCE)
 580                         memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
 581                 break;
 582         case NIX_AQ_INSTOP_NOP:
 583         case NIX_AQ_INSTOP_READ:
 584         case NIX_AQ_INSTOP_LOCK:
 585         case NIX_AQ_INSTOP_UNLOCK:
 586                 break;
 587         default:
 588                 rc = NIX_AF_ERR_AQ_ENQUEUE;
 589                 return rc;
 590         }
 591 
 592         spin_lock(&aq->lock);
 593 
 594         /* Submit the instruction to AQ */
 595         rc = nix_aq_enqueue_wait(rvu, block, &inst);
 596         if (rc) {
 597                 spin_unlock(&aq->lock);
 598                 return rc;
 599         }
 600 
 601         /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
 602         if (req->op == NIX_AQ_INSTOP_INIT) {
 603                 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
 604                         __set_bit(req->qidx, pfvf->rq_bmap);
 605                 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
 606                         __set_bit(req->qidx, pfvf->sq_bmap);
 607                 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
 608                         __set_bit(req->qidx, pfvf->cq_bmap);
 609         }
 610 
 611         if (req->op == NIX_AQ_INSTOP_WRITE) {
 612                 if (req->ctype == NIX_AQ_CTYPE_RQ) {
 613                         ena = (req->rq.ena & req->rq_mask.ena) |
 614                                 (test_bit(req->qidx, pfvf->rq_bmap) &
 615                                 ~req->rq_mask.ena);
 616                         if (ena)
 617                                 __set_bit(req->qidx, pfvf->rq_bmap);
 618                         else
 619                                 __clear_bit(req->qidx, pfvf->rq_bmap);
 620                 }
 621                 if (req->ctype == NIX_AQ_CTYPE_SQ) {
 622                         ena = (req->rq.ena & req->sq_mask.ena) |
 623                                 (test_bit(req->qidx, pfvf->sq_bmap) &
 624                                 ~req->sq_mask.ena);
 625                         if (ena)
 626                                 __set_bit(req->qidx, pfvf->sq_bmap);
 627                         else
 628                                 __clear_bit(req->qidx, pfvf->sq_bmap);
 629                 }
 630                 if (req->ctype == NIX_AQ_CTYPE_CQ) {
 631                         ena = (req->rq.ena & req->cq_mask.ena) |
 632                                 (test_bit(req->qidx, pfvf->cq_bmap) &
 633                                 ~req->cq_mask.ena);
 634                         if (ena)
 635                                 __set_bit(req->qidx, pfvf->cq_bmap);
 636                         else
 637                                 __clear_bit(req->qidx, pfvf->cq_bmap);
 638                 }
 639         }
 640 
 641         if (rsp) {
 642                 /* Copy read context into mailbox */
 643                 if (req->op == NIX_AQ_INSTOP_READ) {
 644                         if (req->ctype == NIX_AQ_CTYPE_RQ)
 645                                 memcpy(&rsp->rq, ctx,
 646                                        sizeof(struct nix_rq_ctx_s));
 647                         else if (req->ctype == NIX_AQ_CTYPE_SQ)
 648                                 memcpy(&rsp->sq, ctx,
 649                                        sizeof(struct nix_sq_ctx_s));
 650                         else if (req->ctype == NIX_AQ_CTYPE_CQ)
 651                                 memcpy(&rsp->cq, ctx,
 652                                        sizeof(struct nix_cq_ctx_s));
 653                         else if (req->ctype == NIX_AQ_CTYPE_RSS)
 654                                 memcpy(&rsp->rss, ctx,
 655                                        sizeof(struct nix_rsse_s));
 656                         else if (req->ctype == NIX_AQ_CTYPE_MCE)
 657                                 memcpy(&rsp->mce, ctx,
 658                                        sizeof(struct nix_rx_mce_s));
 659                 }
 660         }
 661 
 662         spin_unlock(&aq->lock);
 663         return 0;
 664 }
 665 
 666 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 667 {
 668         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 669         struct nix_aq_enq_req aq_req;
 670         unsigned long *bmap;
 671         int qidx, q_cnt = 0;
 672         int err = 0, rc;
 673 
 674         if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
 675                 return NIX_AF_ERR_AQ_ENQUEUE;
 676 
 677         memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
 678         aq_req.hdr.pcifunc = req->hdr.pcifunc;
 679 
 680         if (req->ctype == NIX_AQ_CTYPE_CQ) {
 681                 aq_req.cq.ena = 0;
 682                 aq_req.cq_mask.ena = 1;
 683                 q_cnt = pfvf->cq_ctx->qsize;
 684                 bmap = pfvf->cq_bmap;
 685         }
 686         if (req->ctype == NIX_AQ_CTYPE_SQ) {
 687                 aq_req.sq.ena = 0;
 688                 aq_req.sq_mask.ena = 1;
 689                 q_cnt = pfvf->sq_ctx->qsize;
 690                 bmap = pfvf->sq_bmap;
 691         }
 692         if (req->ctype == NIX_AQ_CTYPE_RQ) {
 693                 aq_req.rq.ena = 0;
 694                 aq_req.rq_mask.ena = 1;
 695                 q_cnt = pfvf->rq_ctx->qsize;
 696                 bmap = pfvf->rq_bmap;
 697         }
 698 
 699         aq_req.ctype = req->ctype;
 700         aq_req.op = NIX_AQ_INSTOP_WRITE;
 701 
 702         for (qidx = 0; qidx < q_cnt; qidx++) {
 703                 if (!test_bit(qidx, bmap))
 704                         continue;
 705                 aq_req.qidx = qidx;
 706                 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
 707                 if (rc) {
 708                         err = rc;
 709                         dev_err(rvu->dev, "Failed to disable %s:%d context\n",
 710                                 (req->ctype == NIX_AQ_CTYPE_CQ) ?
 711                                 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
 712                                 "RQ" : "SQ"), qidx);
 713                 }
 714         }
 715 
 716         return err;
 717 }
 718 
 719 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 720                                 struct nix_aq_enq_req *req,
 721                                 struct nix_aq_enq_rsp *rsp)
 722 {
 723         return rvu_nix_aq_enq_inst(rvu, req, rsp);
 724 }
 725 
 726 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
 727                                        struct hwctx_disable_req *req,
 728                                        struct msg_rsp *rsp)
 729 {
 730         return nix_lf_hwctx_disable(rvu, req);
 731 }
 732 
 733 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
 734                                   struct nix_lf_alloc_req *req,
 735                                   struct nix_lf_alloc_rsp *rsp)
 736 {
 737         int nixlf, qints, hwctx_size, intf, err, rc = 0;
 738         struct rvu_hwinfo *hw = rvu->hw;
 739         u16 pcifunc = req->hdr.pcifunc;
 740         struct rvu_block *block;
 741         struct rvu_pfvf *pfvf;
 742         u64 cfg, ctx_cfg;
 743         int blkaddr;
 744 
 745         if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
 746                 return NIX_AF_ERR_PARAM;
 747 
 748         pfvf = rvu_get_pfvf(rvu, pcifunc);
 749         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 750         if (!pfvf->nixlf || blkaddr < 0)
 751                 return NIX_AF_ERR_AF_LF_INVALID;
 752 
 753         block = &hw->block[blkaddr];
 754         nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 755         if (nixlf < 0)
 756                 return NIX_AF_ERR_AF_LF_INVALID;
 757 
 758         /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
 759         if (req->npa_func) {
 760                 /* If default, use 'this' NIXLF's PFFUNC */
 761                 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
 762                         req->npa_func = pcifunc;
 763                 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
 764                         return NIX_AF_INVAL_NPA_PF_FUNC;
 765         }
 766 
 767         /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
 768         if (req->sso_func) {
 769                 /* If default, use 'this' NIXLF's PFFUNC */
 770                 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
 771                         req->sso_func = pcifunc;
 772                 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
 773                         return NIX_AF_INVAL_SSO_PF_FUNC;
 774         }
 775 
 776         /* If RSS is being enabled, check if requested config is valid.
 777          * RSS table size should be power of two, otherwise
 778          * RSS_GRP::OFFSET + adder might go beyond that group or
 779          * won't be able to use entire table.
 780          */
 781         if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
 782                             !is_power_of_2(req->rss_sz)))
 783                 return NIX_AF_ERR_RSS_SIZE_INVALID;
 784 
 785         if (req->rss_sz &&
 786             (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
 787                 return NIX_AF_ERR_RSS_GRPS_INVALID;
 788 
 789         /* Reset this NIX LF */
 790         err = rvu_lf_reset(rvu, block, nixlf);
 791         if (err) {
 792                 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
 793                         block->addr - BLKADDR_NIX0, nixlf);
 794                 return NIX_AF_ERR_LF_RESET;
 795         }
 796 
 797         ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
 798 
 799         /* Alloc NIX RQ HW context memory and config the base */
 800         hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
 801         err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
 802         if (err)
 803                 goto free_mem;
 804 
 805         pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
 806         if (!pfvf->rq_bmap)
 807                 goto free_mem;
 808 
 809         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
 810                     (u64)pfvf->rq_ctx->iova);
 811 
 812         /* Set caching and queue count in HW */
 813         cfg = BIT_ULL(36) | (req->rq_cnt - 1);
 814         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
 815 
 816         /* Alloc NIX SQ HW context memory and config the base */
 817         hwctx_size = 1UL << (ctx_cfg & 0xF);
 818         err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
 819         if (err)
 820                 goto free_mem;
 821 
 822         pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
 823         if (!pfvf->sq_bmap)
 824                 goto free_mem;
 825 
 826         rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
 827                     (u64)pfvf->sq_ctx->iova);
 828         cfg = BIT_ULL(36) | (req->sq_cnt - 1);
 829         rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
 830 
 831         /* Alloc NIX CQ HW context memory and config the base */
 832         hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
 833         err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
 834         if (err)
 835                 goto free_mem;
 836 
 837         pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
 838         if (!pfvf->cq_bmap)
 839                 goto free_mem;
 840 
 841         rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
 842                     (u64)pfvf->cq_ctx->iova);
 843         cfg = BIT_ULL(36) | (req->cq_cnt - 1);
 844         rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
 845 
 846         /* Initialize receive side scaling (RSS) */
 847         hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
 848         err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
 849                                  req->rss_sz, req->rss_grps, hwctx_size);
 850         if (err)
 851                 goto free_mem;
 852 
 853         /* Alloc memory for CQINT's HW contexts */
 854         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
 855         qints = (cfg >> 24) & 0xFFF;
 856         hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
 857         err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
 858         if (err)
 859                 goto free_mem;
 860 
 861         rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
 862                     (u64)pfvf->cq_ints_ctx->iova);
 863         rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
 864 
 865         /* Alloc memory for QINT's HW contexts */
 866         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
 867         qints = (cfg >> 12) & 0xFFF;
 868         hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
 869         err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
 870         if (err)
 871                 goto free_mem;
 872 
 873         rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
 874                     (u64)pfvf->nix_qints_ctx->iova);
 875         rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
 876 
 877         /* Setup VLANX TPID's.
 878          * Use VLAN1 for 802.1Q
 879          * and VLAN0 for 802.1AD.
 880          */
 881         cfg = (0x8100ULL << 16) | 0x88A8ULL;
 882         rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
 883 
 884         /* Enable LMTST for this NIX LF */
 885         rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
 886 
 887         /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
 888         if (req->npa_func)
 889                 cfg = req->npa_func;
 890         if (req->sso_func)
 891                 cfg |= (u64)req->sso_func << 16;
 892 
 893         cfg |= (u64)req->xqe_sz << 33;
 894         rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
 895 
 896         /* Config Rx pkt length, csum checks and apad  enable / disable */
 897         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
 898 
 899         intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
 900         err = nix_interface_init(rvu, pcifunc, intf, nixlf);
 901         if (err)
 902                 goto free_mem;
 903 
 904         /* Disable NPC entries as NIXLF's contexts are not initialized yet */
 905         rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
 906 
 907         goto exit;
 908 
 909 free_mem:
 910         nix_ctx_free(rvu, pfvf);
 911         rc = -ENOMEM;
 912 
 913 exit:
 914         /* Set macaddr of this PF/VF */
 915         ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
 916 
 917         /* set SQB size info */
 918         cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
 919         rsp->sqb_size = (cfg >> 34) & 0xFFFF;
 920         rsp->rx_chan_base = pfvf->rx_chan_base;
 921         rsp->tx_chan_base = pfvf->tx_chan_base;
 922         rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
 923         rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
 924         rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
 925         rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
 926         /* Get HW supported stat count */
 927         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
 928         rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
 929         rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
 930         /* Get count of CQ IRQs and error IRQs supported per LF */
 931         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
 932         rsp->qints = ((cfg >> 12) & 0xFFF);
 933         rsp->cints = ((cfg >> 24) & 0xFFF);
 934         return rc;
 935 }
 936 
 937 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
 938                                  struct msg_rsp *rsp)
 939 {
 940         struct rvu_hwinfo *hw = rvu->hw;
 941         u16 pcifunc = req->hdr.pcifunc;
 942         struct rvu_block *block;
 943         int blkaddr, nixlf, err;
 944         struct rvu_pfvf *pfvf;
 945 
 946         pfvf = rvu_get_pfvf(rvu, pcifunc);
 947         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 948         if (!pfvf->nixlf || blkaddr < 0)
 949                 return NIX_AF_ERR_AF_LF_INVALID;
 950 
 951         block = &hw->block[blkaddr];
 952         nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 953         if (nixlf < 0)
 954                 return NIX_AF_ERR_AF_LF_INVALID;
 955 
 956         nix_interface_deinit(rvu, pcifunc, nixlf);
 957 
 958         /* Reset this NIX LF */
 959         err = rvu_lf_reset(rvu, block, nixlf);
 960         if (err) {
 961                 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
 962                         block->addr - BLKADDR_NIX0, nixlf);
 963                 return NIX_AF_ERR_LF_RESET;
 964         }
 965 
 966         nix_ctx_free(rvu, pfvf);
 967 
 968         return 0;
 969 }
 970 
 971 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
 972                                          struct nix_mark_format_cfg  *req,
 973                                          struct nix_mark_format_cfg_rsp *rsp)
 974 {
 975         u16 pcifunc = req->hdr.pcifunc;
 976         struct nix_hw *nix_hw;
 977         struct rvu_pfvf *pfvf;
 978         int blkaddr, rc;
 979         u32 cfg;
 980 
 981         pfvf = rvu_get_pfvf(rvu, pcifunc);
 982         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 983         if (!pfvf->nixlf || blkaddr < 0)
 984                 return NIX_AF_ERR_AF_LF_INVALID;
 985 
 986         nix_hw = get_nix_hw(rvu->hw, blkaddr);
 987         if (!nix_hw)
 988                 return -EINVAL;
 989 
 990         cfg = (((u32)req->offset & 0x7) << 16) |
 991               (((u32)req->y_mask & 0xF) << 12) |
 992               (((u32)req->y_val & 0xF) << 8) |
 993               (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
 994 
 995         rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
 996         if (rc < 0) {
 997                 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
 998                         rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
 999                 return NIX_AF_ERR_MARK_CFG_FAIL;
1000         }
1001 
1002         rsp->mark_format_idx = rc;
1003         return 0;
1004 }
1005 
1006 /* Disable shaping of pkts by a scheduler queue
1007  * at a given scheduler level.
1008  */
1009 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1010                                  int lvl, int schq)
1011 {
1012         u64  cir_reg = 0, pir_reg = 0;
1013         u64  cfg;
1014 
1015         switch (lvl) {
1016         case NIX_TXSCH_LVL_TL1:
1017                 cir_reg = NIX_AF_TL1X_CIR(schq);
1018                 pir_reg = 0; /* PIR not available at TL1 */
1019                 break;
1020         case NIX_TXSCH_LVL_TL2:
1021                 cir_reg = NIX_AF_TL2X_CIR(schq);
1022                 pir_reg = NIX_AF_TL2X_PIR(schq);
1023                 break;
1024         case NIX_TXSCH_LVL_TL3:
1025                 cir_reg = NIX_AF_TL3X_CIR(schq);
1026                 pir_reg = NIX_AF_TL3X_PIR(schq);
1027                 break;
1028         case NIX_TXSCH_LVL_TL4:
1029                 cir_reg = NIX_AF_TL4X_CIR(schq);
1030                 pir_reg = NIX_AF_TL4X_PIR(schq);
1031                 break;
1032         }
1033 
1034         if (!cir_reg)
1035                 return;
1036         cfg = rvu_read64(rvu, blkaddr, cir_reg);
1037         rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1038 
1039         if (!pir_reg)
1040                 return;
1041         cfg = rvu_read64(rvu, blkaddr, pir_reg);
1042         rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1043 }
1044 
1045 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1046                                  int lvl, int schq)
1047 {
1048         struct rvu_hwinfo *hw = rvu->hw;
1049         int link;
1050 
1051         /* Reset TL4's SDP link config */
1052         if (lvl == NIX_TXSCH_LVL_TL4)
1053                 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1054 
1055         if (lvl != NIX_TXSCH_LVL_TL2)
1056                 return;
1057 
1058         /* Reset TL2's CGX or LBK link config */
1059         for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1060                 rvu_write64(rvu, blkaddr,
1061                             NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1062 }
1063 
1064 static int
1065 rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
1066                   u16 *schq_list, u16 *schq_cnt)
1067 {
1068         struct nix_txsch *txsch;
1069         struct nix_hw *nix_hw;
1070         struct rvu_pfvf *pfvf;
1071         u8 cgx_id, lmac_id;
1072         u16 schq_base;
1073         u32 *pfvf_map;
1074         int pf, intf;
1075 
1076         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1077         if (!nix_hw)
1078                 return -ENODEV;
1079 
1080         pfvf = rvu_get_pfvf(rvu, pcifunc);
1081         txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
1082         pfvf_map = txsch->pfvf_map;
1083         pf = rvu_get_pf(pcifunc);
1084 
1085         /* static allocation as two TL1's per link */
1086         intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1087 
1088         switch (intf) {
1089         case NIX_INTF_TYPE_CGX:
1090                 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
1091                 schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
1092                 break;
1093         case NIX_INTF_TYPE_LBK:
1094                 schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
1095                 break;
1096         default:
1097                 return -ENODEV;
1098         }
1099 
1100         if (schq_base + 1 > txsch->schq.max)
1101                 return -ENODEV;
1102 
1103         /* init pfvf_map as we store flags */
1104         if (pfvf_map[schq_base] == U32_MAX) {
1105                 pfvf_map[schq_base] =
1106                         TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
1107                 pfvf_map[schq_base + 1] =
1108                         TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
1109 
1110                 /* Onetime reset for TL1 */
1111                 nix_reset_tx_linkcfg(rvu, blkaddr,
1112                                      NIX_TXSCH_LVL_TL1, schq_base);
1113                 nix_reset_tx_shaping(rvu, blkaddr,
1114                                      NIX_TXSCH_LVL_TL1, schq_base);
1115 
1116                 nix_reset_tx_linkcfg(rvu, blkaddr,
1117                                      NIX_TXSCH_LVL_TL1, schq_base + 1);
1118                 nix_reset_tx_shaping(rvu, blkaddr,
1119                                      NIX_TXSCH_LVL_TL1, schq_base + 1);
1120         }
1121 
1122         if (schq_list && schq_cnt) {
1123                 schq_list[0] = schq_base;
1124                 schq_list[1] = schq_base + 1;
1125                 *schq_cnt = 2;
1126         }
1127 
1128         return 0;
1129 }
1130 
1131 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1132                                      struct nix_txsch_alloc_req *req,
1133                                      struct nix_txsch_alloc_rsp *rsp)
1134 {
1135         u16 pcifunc = req->hdr.pcifunc;
1136         struct nix_txsch *txsch;
1137         int lvl, idx, req_schq;
1138         struct rvu_pfvf *pfvf;
1139         struct nix_hw *nix_hw;
1140         int blkaddr, rc = 0;
1141         u32 *pfvf_map;
1142         u16 schq;
1143 
1144         pfvf = rvu_get_pfvf(rvu, pcifunc);
1145         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1146         if (!pfvf->nixlf || blkaddr < 0)
1147                 return NIX_AF_ERR_AF_LF_INVALID;
1148 
1149         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1150         if (!nix_hw)
1151                 return -EINVAL;
1152 
1153         mutex_lock(&rvu->rsrc_lock);
1154         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1155                 txsch = &nix_hw->txsch[lvl];
1156                 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1157                 pfvf_map = txsch->pfvf_map;
1158 
1159                 if (!req_schq)
1160                         continue;
1161 
1162                 /* There are only 28 TL1s */
1163                 if (lvl == NIX_TXSCH_LVL_TL1) {
1164                         if (req->schq_contig[lvl] ||
1165                             req->schq[lvl] > 2 ||
1166                             rvu_get_tl1_schqs(rvu, blkaddr,
1167                                               pcifunc, NULL, NULL))
1168                                 goto err;
1169                         continue;
1170                 }
1171 
1172                 /* Check if request is valid */
1173                 if (req_schq > MAX_TXSCHQ_PER_FUNC)
1174                         goto err;
1175 
1176                 /* If contiguous queues are needed, check for availability */
1177                 if (req->schq_contig[lvl] &&
1178                     !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1179                         goto err;
1180 
1181                 /* Check if full request can be accommodated */
1182                 if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
1183                         goto err;
1184         }
1185 
1186         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1187                 txsch = &nix_hw->txsch[lvl];
1188                 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1189                 pfvf_map = txsch->pfvf_map;
1190                 rsp->schq[lvl] = req->schq[lvl];
1191 
1192                 if (!req->schq[lvl] && !req->schq_contig[lvl])
1193                         continue;
1194 
1195                 /* Handle TL1 specially as it is
1196                  * allocation is restricted to 2 TL1's
1197                  * per link
1198                  */
1199 
1200                 if (lvl == NIX_TXSCH_LVL_TL1) {
1201                         rsp->schq_contig[lvl] = 0;
1202                         rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
1203                                           &rsp->schq_list[lvl][0],
1204                                           &rsp->schq[lvl]);
1205                         continue;
1206                 }
1207 
1208                 /* Alloc contiguous queues first */
1209                 if (req->schq_contig[lvl]) {
1210                         schq = rvu_alloc_rsrc_contig(&txsch->schq,
1211                                                      req->schq_contig[lvl]);
1212 
1213                         for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1214                                 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1215                                 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1216                                 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1217                                 rsp->schq_contig_list[lvl][idx] = schq;
1218                                 schq++;
1219                         }
1220                 }
1221 
1222                 /* Alloc non-contiguous queues */
1223                 for (idx = 0; idx < req->schq[lvl]; idx++) {
1224                         schq = rvu_alloc_rsrc(&txsch->schq);
1225                         pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1226                         nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1227                         nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1228                         rsp->schq_list[lvl][idx] = schq;
1229                 }
1230         }
1231         goto exit;
1232 err:
1233         rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1234 exit:
1235         mutex_unlock(&rvu->rsrc_lock);
1236         return rc;
1237 }
1238 
1239 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1240 {
1241         int blkaddr, nixlf, lvl, schq, err;
1242         struct rvu_hwinfo *hw = rvu->hw;
1243         struct nix_txsch *txsch;
1244         struct nix_hw *nix_hw;
1245         u64 cfg;
1246 
1247         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1248         if (blkaddr < 0)
1249                 return NIX_AF_ERR_AF_LF_INVALID;
1250 
1251         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1252         if (!nix_hw)
1253                 return -EINVAL;
1254 
1255         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1256         if (nixlf < 0)
1257                 return NIX_AF_ERR_AF_LF_INVALID;
1258 
1259         /* Disable TL2/3 queue links before SMQ flush*/
1260         mutex_lock(&rvu->rsrc_lock);
1261         for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1262                 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1263                         continue;
1264 
1265                 txsch = &nix_hw->txsch[lvl];
1266                 for (schq = 0; schq < txsch->schq.max; schq++) {
1267                         if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1268                                 continue;
1269                         nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1270                 }
1271         }
1272 
1273         /* Flush SMQs */
1274         txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1275         for (schq = 0; schq < txsch->schq.max; schq++) {
1276                 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1277                         continue;
1278                 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1279                 /* Do SMQ flush and set enqueue xoff */
1280                 cfg |= BIT_ULL(50) | BIT_ULL(49);
1281                 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1282 
1283                 /* Wait for flush to complete */
1284                 err = rvu_poll_reg(rvu, blkaddr,
1285                                    NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1286                 if (err) {
1287                         dev_err(rvu->dev,
1288                                 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1289                 }
1290         }
1291 
1292         /* Now free scheduler queues to free pool */
1293         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1294                 /* Free all SCHQ's except TL1 as
1295                  * TL1 is shared across all VF's for a RVU PF
1296                  */
1297                 if (lvl == NIX_TXSCH_LVL_TL1)
1298                         continue;
1299 
1300                 txsch = &nix_hw->txsch[lvl];
1301                 for (schq = 0; schq < txsch->schq.max; schq++) {
1302                         if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1303                                 continue;
1304                         rvu_free_rsrc(&txsch->schq, schq);
1305                         txsch->pfvf_map[schq] = 0;
1306                 }
1307         }
1308         mutex_unlock(&rvu->rsrc_lock);
1309 
1310         /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1311         rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1312         err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1313         if (err)
1314                 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1315 
1316         return 0;
1317 }
1318 
1319 static int nix_txschq_free_one(struct rvu *rvu,
1320                                struct nix_txsch_free_req *req)
1321 {
1322         int lvl, schq, nixlf, blkaddr, rc;
1323         struct rvu_hwinfo *hw = rvu->hw;
1324         u16 pcifunc = req->hdr.pcifunc;
1325         struct nix_txsch *txsch;
1326         struct nix_hw *nix_hw;
1327         u32 *pfvf_map;
1328         u64 cfg;
1329 
1330         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1331         if (blkaddr < 0)
1332                 return NIX_AF_ERR_AF_LF_INVALID;
1333 
1334         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1335         if (!nix_hw)
1336                 return -EINVAL;
1337 
1338         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1339         if (nixlf < 0)
1340                 return NIX_AF_ERR_AF_LF_INVALID;
1341 
1342         lvl = req->schq_lvl;
1343         schq = req->schq;
1344         txsch = &nix_hw->txsch[lvl];
1345 
1346         /* Don't allow freeing TL1 */
1347         if (lvl > NIX_TXSCH_LVL_TL2 ||
1348             schq >= txsch->schq.max)
1349                 goto err;
1350 
1351         pfvf_map = txsch->pfvf_map;
1352         mutex_lock(&rvu->rsrc_lock);
1353 
1354         if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1355                 mutex_unlock(&rvu->rsrc_lock);
1356                 goto err;
1357         }
1358 
1359         /* Flush if it is a SMQ. Onus of disabling
1360          * TL2/3 queue links before SMQ flush is on user
1361          */
1362         if (lvl == NIX_TXSCH_LVL_SMQ) {
1363                 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1364                 /* Do SMQ flush and set enqueue xoff */
1365                 cfg |= BIT_ULL(50) | BIT_ULL(49);
1366                 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1367 
1368                 /* Wait for flush to complete */
1369                 rc = rvu_poll_reg(rvu, blkaddr,
1370                                   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1371                 if (rc) {
1372                         dev_err(rvu->dev,
1373                                 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1374                 }
1375         }
1376 
1377         /* Free the resource */
1378         rvu_free_rsrc(&txsch->schq, schq);
1379         txsch->pfvf_map[schq] = 0;
1380         mutex_unlock(&rvu->rsrc_lock);
1381         return 0;
1382 err:
1383         return NIX_AF_ERR_TLX_INVALID;
1384 }
1385 
1386 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1387                                     struct nix_txsch_free_req *req,
1388                                     struct msg_rsp *rsp)
1389 {
1390         if (req->flags & TXSCHQ_FREE_ALL)
1391                 return nix_txschq_free(rvu, req->hdr.pcifunc);
1392         else
1393                 return nix_txschq_free_one(rvu, req);
1394 }
1395 
1396 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1397                                    int lvl, u64 reg, u64 regval)
1398 {
1399         u64 regbase = reg & 0xFFFF;
1400         u16 schq, parent;
1401 
1402         if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1403                 return false;
1404 
1405         schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1406         /* Check if this schq belongs to this PF/VF or not */
1407         if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1408                 return false;
1409 
1410         parent = (regval >> 16) & 0x1FF;
1411         /* Validate MDQ's TL4 parent */
1412         if (regbase == NIX_AF_MDQX_PARENT(0) &&
1413             !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1414                 return false;
1415 
1416         /* Validate TL4's TL3 parent */
1417         if (regbase == NIX_AF_TL4X_PARENT(0) &&
1418             !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1419                 return false;
1420 
1421         /* Validate TL3's TL2 parent */
1422         if (regbase == NIX_AF_TL3X_PARENT(0) &&
1423             !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1424                 return false;
1425 
1426         /* Validate TL2's TL1 parent */
1427         if (regbase == NIX_AF_TL2X_PARENT(0) &&
1428             !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1429                 return false;
1430 
1431         return true;
1432 }
1433 
1434 static int
1435 nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
1436 {
1437         u16 schq_list[2], schq_cnt, schq;
1438         int blkaddr, idx, err = 0;
1439         u16 map_func, map_flags;
1440         struct nix_hw *nix_hw;
1441         u64 reg, regval;
1442         u32 *pfvf_map;
1443 
1444         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1445         if (blkaddr < 0)
1446                 return NIX_AF_ERR_AF_LF_INVALID;
1447 
1448         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1449         if (!nix_hw)
1450                 return -EINVAL;
1451 
1452         pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1453 
1454         mutex_lock(&rvu->rsrc_lock);
1455 
1456         err = rvu_get_tl1_schqs(rvu, blkaddr,
1457                                 pcifunc, schq_list, &schq_cnt);
1458         if (err)
1459                 goto unlock;
1460 
1461         for (idx = 0; idx < schq_cnt; idx++) {
1462                 schq = schq_list[idx];
1463                 map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
1464                 map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
1465 
1466                 /* check if config is already done or this is pf */
1467                 if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
1468                         continue;
1469 
1470                 /* default configuration */
1471                 reg = NIX_AF_TL1X_TOPOLOGY(schq);
1472                 regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
1473                 rvu_write64(rvu, blkaddr, reg, regval);
1474                 reg = NIX_AF_TL1X_SCHEDULE(schq);
1475                 regval = TXSCH_TL1_DFLT_RR_QTM;
1476                 rvu_write64(rvu, blkaddr, reg, regval);
1477                 reg = NIX_AF_TL1X_CIR(schq);
1478                 regval = 0;
1479                 rvu_write64(rvu, blkaddr, reg, regval);
1480 
1481                 map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
1482                 pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
1483         }
1484 unlock:
1485         mutex_unlock(&rvu->rsrc_lock);
1486         return err;
1487 }
1488 
1489 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1490                                     struct nix_txschq_config *req,
1491                                     struct msg_rsp *rsp)
1492 {
1493         u16 schq, pcifunc = req->hdr.pcifunc;
1494         struct rvu_hwinfo *hw = rvu->hw;
1495         u64 reg, regval, schq_regbase;
1496         struct nix_txsch *txsch;
1497         u16 map_func, map_flags;
1498         struct nix_hw *nix_hw;
1499         int blkaddr, idx, err;
1500         u32 *pfvf_map;
1501         int nixlf;
1502 
1503         if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1504             req->num_regs > MAX_REGS_PER_MBOX_MSG)
1505                 return NIX_AF_INVAL_TXSCHQ_CFG;
1506 
1507         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1508         if (blkaddr < 0)
1509                 return NIX_AF_ERR_AF_LF_INVALID;
1510 
1511         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1512         if (!nix_hw)
1513                 return -EINVAL;
1514 
1515         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1516         if (nixlf < 0)
1517                 return NIX_AF_ERR_AF_LF_INVALID;
1518 
1519         txsch = &nix_hw->txsch[req->lvl];
1520         pfvf_map = txsch->pfvf_map;
1521 
1522         /* VF is only allowed to trigger
1523          * setting default cfg on TL1
1524          */
1525         if (pcifunc & RVU_PFVF_FUNC_MASK &&
1526             req->lvl == NIX_TXSCH_LVL_TL1) {
1527                 return nix_tl1_default_cfg(rvu, pcifunc);
1528         }
1529 
1530         for (idx = 0; idx < req->num_regs; idx++) {
1531                 reg = req->reg[idx];
1532                 regval = req->regval[idx];
1533                 schq_regbase = reg & 0xFFFF;
1534 
1535                 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1536                                             txsch->lvl, reg, regval))
1537                         return NIX_AF_INVAL_TXSCHQ_CFG;
1538 
1539                 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1540                 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1541                         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1542                                            pcifunc, 0);
1543                         regval &= ~(0x7FULL << 24);
1544                         regval |= ((u64)nixlf << 24);
1545                 }
1546 
1547                 /* Mark config as done for TL1 by PF */
1548                 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1549                     schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1550                         schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1551 
1552                         mutex_lock(&rvu->rsrc_lock);
1553 
1554                         map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
1555                         map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
1556 
1557                         map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
1558                         pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
1559                         mutex_unlock(&rvu->rsrc_lock);
1560                 }
1561 
1562                 rvu_write64(rvu, blkaddr, reg, regval);
1563 
1564                 /* Check for SMQ flush, if so, poll for its completion */
1565                 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1566                     (regval & BIT_ULL(49))) {
1567                         err = rvu_poll_reg(rvu, blkaddr,
1568                                            reg, BIT_ULL(49), true);
1569                         if (err)
1570                                 return NIX_AF_SMQ_FLUSH_FAILED;
1571                 }
1572         }
1573         return 0;
1574 }
1575 
1576 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1577                            struct nix_vtag_config *req)
1578 {
1579         u64 regval = req->vtag_size;
1580 
1581         if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1582                 return -EINVAL;
1583 
1584         if (req->rx.capture_vtag)
1585                 regval |= BIT_ULL(5);
1586         if (req->rx.strip_vtag)
1587                 regval |= BIT_ULL(4);
1588 
1589         rvu_write64(rvu, blkaddr,
1590                     NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1591         return 0;
1592 }
1593 
1594 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1595                                   struct nix_vtag_config *req,
1596                                   struct msg_rsp *rsp)
1597 {
1598         struct rvu_hwinfo *hw = rvu->hw;
1599         u16 pcifunc = req->hdr.pcifunc;
1600         int blkaddr, nixlf, err;
1601 
1602         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1603         if (blkaddr < 0)
1604                 return NIX_AF_ERR_AF_LF_INVALID;
1605 
1606         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1607         if (nixlf < 0)
1608                 return NIX_AF_ERR_AF_LF_INVALID;
1609 
1610         if (req->cfg_type) {
1611                 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1612                 if (err)
1613                         return NIX_AF_ERR_PARAM;
1614         } else {
1615                 /* TODO: handle tx vtag configuration */
1616                 return 0;
1617         }
1618 
1619         return 0;
1620 }
1621 
1622 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1623                          u16 pcifunc, int next, bool eol)
1624 {
1625         struct nix_aq_enq_req aq_req;
1626         int err;
1627 
1628         aq_req.hdr.pcifunc = 0;
1629         aq_req.ctype = NIX_AQ_CTYPE_MCE;
1630         aq_req.op = op;
1631         aq_req.qidx = mce;
1632 
1633         /* Forward bcast pkts to RQ0, RSS not needed */
1634         aq_req.mce.op = 0;
1635         aq_req.mce.index = 0;
1636         aq_req.mce.eol = eol;
1637         aq_req.mce.pf_func = pcifunc;
1638         aq_req.mce.next = next;
1639 
1640         /* All fields valid */
1641         *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1642 
1643         err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1644         if (err) {
1645                 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1646                         rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1647                 return err;
1648         }
1649         return 0;
1650 }
1651 
1652 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1653                                u16 pcifunc, int idx, bool add)
1654 {
1655         struct mce *mce, *tail = NULL;
1656         bool delete = false;
1657 
1658         /* Scan through the current list */
1659         hlist_for_each_entry(mce, &mce_list->head, node) {
1660                 /* If already exists, then delete */
1661                 if (mce->pcifunc == pcifunc && !add) {
1662                         delete = true;
1663                         break;
1664                 }
1665                 tail = mce;
1666         }
1667 
1668         if (delete) {
1669                 hlist_del(&mce->node);
1670                 kfree(mce);
1671                 mce_list->count--;
1672                 return 0;
1673         }
1674 
1675         if (!add)
1676                 return 0;
1677 
1678         /* Add a new one to the list, at the tail */
1679         mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1680         if (!mce)
1681                 return -ENOMEM;
1682         mce->idx = idx;
1683         mce->pcifunc = pcifunc;
1684         if (!tail)
1685                 hlist_add_head(&mce->node, &mce_list->head);
1686         else
1687                 hlist_add_behind(&mce->node, &tail->node);
1688         mce_list->count++;
1689         return 0;
1690 }
1691 
1692 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1693 {
1694         int err = 0, idx, next_idx, count;
1695         struct nix_mce_list *mce_list;
1696         struct mce *mce, *next_mce;
1697         struct nix_mcast *mcast;
1698         struct nix_hw *nix_hw;
1699         struct rvu_pfvf *pfvf;
1700         int blkaddr;
1701 
1702         /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1703         if (is_afvf(pcifunc))
1704                 return 0;
1705 
1706         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1707         if (blkaddr < 0)
1708                 return 0;
1709 
1710         nix_hw = get_nix_hw(rvu->hw, blkaddr);
1711         if (!nix_hw)
1712                 return 0;
1713 
1714         mcast = &nix_hw->mcast;
1715 
1716         /* Get this PF/VF func's MCE index */
1717         pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1718         idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1719 
1720         mce_list = &pfvf->bcast_mce_list;
1721         if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1722                 dev_err(rvu->dev,
1723                         "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1724                         __func__, idx, mce_list->max,
1725                         pcifunc >> RVU_PFVF_PF_SHIFT);
1726                 return -EINVAL;
1727         }
1728 
1729         mutex_lock(&mcast->mce_lock);
1730 
1731         err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1732         if (err)
1733                 goto end;
1734 
1735         /* Disable MCAM entry in NPC */
1736 
1737         if (!mce_list->count)
1738                 goto end;
1739         count = mce_list->count;
1740 
1741         /* Dump the updated list to HW */
1742         hlist_for_each_entry(mce, &mce_list->head, node) {
1743                 next_idx = 0;
1744                 count--;
1745                 if (count) {
1746                         next_mce = hlist_entry(mce->node.next,
1747                                                struct mce, node);
1748                         next_idx = next_mce->idx;
1749                 }
1750                 /* EOL should be set in last MCE */
1751                 err = nix_setup_mce(rvu, mce->idx,
1752                                     NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1753                                     next_idx, count ? false : true);
1754                 if (err)
1755                         goto end;
1756         }
1757 
1758 end:
1759         mutex_unlock(&mcast->mce_lock);
1760         return err;
1761 }
1762 
1763 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1764 {
1765         struct nix_mcast *mcast = &nix_hw->mcast;
1766         int err, pf, numvfs, idx;
1767         struct rvu_pfvf *pfvf;
1768         u16 pcifunc;
1769         u64 cfg;
1770 
1771         /* Skip PF0 (i.e AF) */
1772         for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1773                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1774                 /* If PF is not enabled, nothing to do */
1775                 if (!((cfg >> 20) & 0x01))
1776                         continue;
1777                 /* Get numVFs attached to this PF */
1778                 numvfs = (cfg >> 12) & 0xFF;
1779 
1780                 pfvf = &rvu->pf[pf];
1781                 /* Save the start MCE */
1782                 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1783 
1784                 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1785 
1786                 for (idx = 0; idx < (numvfs + 1); idx++) {
1787                         /* idx-0 is for PF, followed by VFs */
1788                         pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1789                         pcifunc |= idx;
1790                         /* Add dummy entries now, so that we don't have to check
1791                          * for whether AQ_OP should be INIT/WRITE later on.
1792                          * Will be updated when a NIXLF is attached/detached to
1793                          * these PF/VFs.
1794                          */
1795                         err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1796                                             NIX_AQ_INSTOP_INIT,
1797                                             pcifunc, 0, true);
1798                         if (err)
1799                                 return err;
1800                 }
1801         }
1802         return 0;
1803 }
1804 
1805 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1806 {
1807         struct nix_mcast *mcast = &nix_hw->mcast;
1808         struct rvu_hwinfo *hw = rvu->hw;
1809         int err, size;
1810 
1811         size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1812         size = (1ULL << size);
1813 
1814         /* Alloc memory for multicast/mirror replication entries */
1815         err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1816                          (256UL << MC_TBL_SIZE), size);
1817         if (err)
1818                 return -ENOMEM;
1819 
1820         rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1821                     (u64)mcast->mce_ctx->iova);
1822 
1823         /* Set max list length equal to max no of VFs per PF  + PF itself */
1824         rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1825                     BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1826 
1827         /* Alloc memory for multicast replication buffers */
1828         size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1829         err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1830                          (8UL << MC_BUF_CNT), size);
1831         if (err)
1832                 return -ENOMEM;
1833 
1834         rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1835                     (u64)mcast->mcast_buf->iova);
1836 
1837         /* Alloc pkind for NIX internal RX multicast/mirror replay */
1838         mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1839 
1840         rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1841                     BIT_ULL(63) | (mcast->replay_pkind << 24) |
1842                     BIT_ULL(20) | MC_BUF_CNT);
1843 
1844         mutex_init(&mcast->mce_lock);
1845 
1846         return nix_setup_bcast_tables(rvu, nix_hw);
1847 }
1848 
1849 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1850 {
1851         struct nix_txsch *txsch;
1852         u64 cfg, reg;
1853         int err, lvl;
1854 
1855         /* Get scheduler queue count of each type and alloc
1856          * bitmap for each for alloc/free/attach operations.
1857          */
1858         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1859                 txsch = &nix_hw->txsch[lvl];
1860                 txsch->lvl = lvl;
1861                 switch (lvl) {
1862                 case NIX_TXSCH_LVL_SMQ:
1863                         reg = NIX_AF_MDQ_CONST;
1864                         break;
1865                 case NIX_TXSCH_LVL_TL4:
1866                         reg = NIX_AF_TL4_CONST;
1867                         break;
1868                 case NIX_TXSCH_LVL_TL3:
1869                         reg = NIX_AF_TL3_CONST;
1870                         break;
1871                 case NIX_TXSCH_LVL_TL2:
1872                         reg = NIX_AF_TL2_CONST;
1873                         break;
1874                 case NIX_TXSCH_LVL_TL1:
1875                         reg = NIX_AF_TL1_CONST;
1876                         break;
1877                 }
1878                 cfg = rvu_read64(rvu, blkaddr, reg);
1879                 txsch->schq.max = cfg & 0xFFFF;
1880                 err = rvu_alloc_bitmap(&txsch->schq);
1881                 if (err)
1882                         return err;
1883 
1884                 /* Allocate memory for scheduler queues to
1885                  * PF/VF pcifunc mapping info.
1886                  */
1887                 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1888                                                sizeof(u32), GFP_KERNEL);
1889                 if (!txsch->pfvf_map)
1890                         return -ENOMEM;
1891                 memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
1892         }
1893         return 0;
1894 }
1895 
1896 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
1897                                 int blkaddr, u32 cfg)
1898 {
1899         int fmt_idx;
1900 
1901         for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
1902                 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
1903                         return fmt_idx;
1904         }
1905         if (fmt_idx >= nix_hw->mark_format.total)
1906                 return -ERANGE;
1907 
1908         rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
1909         nix_hw->mark_format.cfg[fmt_idx] = cfg;
1910         nix_hw->mark_format.in_use++;
1911         return fmt_idx;
1912 }
1913 
1914 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
1915                                     int blkaddr)
1916 {
1917         u64 cfgs[] = {
1918                 [NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
1919                 [NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
1920                 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
1921                 [NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
1922                 [NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
1923                 [NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
1924                 [NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
1925                 [NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
1926                 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
1927         };
1928         int i, rc;
1929         u64 total;
1930 
1931         total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
1932         nix_hw->mark_format.total = (u8)total;
1933         nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
1934                                                GFP_KERNEL);
1935         if (!nix_hw->mark_format.cfg)
1936                 return -ENOMEM;
1937         for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
1938                 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
1939                 if (rc < 0)
1940                         dev_err(rvu->dev, "Err %d in setup mark format %d\n",
1941                                 i, rc);
1942         }
1943 
1944         return 0;
1945 }
1946 
1947 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
1948                                    struct msg_rsp *rsp)
1949 {
1950         struct rvu_hwinfo *hw = rvu->hw;
1951         u16 pcifunc = req->hdr.pcifunc;
1952         int i, nixlf, blkaddr;
1953         u64 stats;
1954 
1955         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1956         if (blkaddr < 0)
1957                 return NIX_AF_ERR_AF_LF_INVALID;
1958 
1959         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1960         if (nixlf < 0)
1961                 return NIX_AF_ERR_AF_LF_INVALID;
1962 
1963         /* Get stats count supported by HW */
1964         stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1965 
1966         /* Reset tx stats */
1967         for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1968                 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1969 
1970         /* Reset rx stats */
1971         for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1972                 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1973 
1974         return 0;
1975 }
1976 
1977 /* Returns the ALG index to be set into NPC_RX_ACTION */
1978 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
1979 {
1980         int i;
1981 
1982         /* Scan over exiting algo entries to find a match */
1983         for (i = 0; i < nix_hw->flowkey.in_use; i++)
1984                 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
1985                         return i;
1986 
1987         return -ERANGE;
1988 }
1989 
1990 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1991 {
1992         int idx, nr_field, key_off, field_marker, keyoff_marker;
1993         int max_key_off, max_bit_pos, group_member;
1994         struct nix_rx_flowkey_alg *field;
1995         struct nix_rx_flowkey_alg tmp;
1996         u32 key_type, valid_key;
1997 
1998         if (!alg)
1999                 return -EINVAL;
2000 
2001 #define FIELDS_PER_ALG  5
2002 #define MAX_KEY_OFF     40
2003         /* Clear all fields */
2004         memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2005 
2006         /* Each of the 32 possible flow key algorithm definitions should
2007          * fall into above incremental config (except ALG0). Otherwise a
2008          * single NPC MCAM entry is not sufficient for supporting RSS.
2009          *
2010          * If a different definition or combination needed then NPC MCAM
2011          * has to be programmed to filter such pkts and it's action should
2012          * point to this definition to calculate flowtag or hash.
2013          *
2014          * The `for loop` goes over _all_ protocol field and the following
2015          * variables depicts the state machine forward progress logic.
2016          *
2017          * keyoff_marker - Enabled when hash byte length needs to be accounted
2018          * in field->key_offset update.
2019          * field_marker - Enabled when a new field needs to be selected.
2020          * group_member - Enabled when protocol is part of a group.
2021          */
2022 
2023         keyoff_marker = 0; max_key_off = 0; group_member = 0;
2024         nr_field = 0; key_off = 0; field_marker = 1;
2025         field = &tmp; max_bit_pos = fls(flow_cfg);
2026         for (idx = 0;
2027              idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2028              key_off < MAX_KEY_OFF; idx++) {
2029                 key_type = BIT(idx);
2030                 valid_key = flow_cfg & key_type;
2031                 /* Found a field marker, reset the field values */
2032                 if (field_marker)
2033                         memset(&tmp, 0, sizeof(tmp));
2034 
2035                 switch (key_type) {
2036                 case NIX_FLOW_KEY_TYPE_PORT:
2037                         field->sel_chan = true;
2038                         /* This should be set to 1, when SEL_CHAN is set */
2039                         field->bytesm1 = 1;
2040                         field_marker = true;
2041                         keyoff_marker = true;
2042                         break;
2043                 case NIX_FLOW_KEY_TYPE_IPV4:
2044                         field->lid = NPC_LID_LC;
2045                         field->ltype_match = NPC_LT_LC_IP;
2046                         field->hdr_offset = 12; /* SIP offset */
2047                         field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2048                         field->ltype_mask = 0xF; /* Match only IPv4 */
2049                         field_marker = true;
2050                         keyoff_marker = false;
2051                         break;
2052                 case NIX_FLOW_KEY_TYPE_IPV6:
2053                         field->lid = NPC_LID_LC;
2054                         field->ltype_match = NPC_LT_LC_IP6;
2055                         field->hdr_offset = 8; /* SIP offset */
2056                         field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2057                         field->ltype_mask = 0xF; /* Match only IPv6 */
2058                         field_marker = true;
2059                         keyoff_marker = true;
2060                         break;
2061                 case NIX_FLOW_KEY_TYPE_TCP:
2062                 case NIX_FLOW_KEY_TYPE_UDP:
2063                 case NIX_FLOW_KEY_TYPE_SCTP:
2064                         field->lid = NPC_LID_LD;
2065                         field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2066                         if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
2067                                 field->ltype_match |= NPC_LT_LD_TCP;
2068                                 group_member = true;
2069                         } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
2070                                    valid_key) {
2071                                 field->ltype_match |= NPC_LT_LD_UDP;
2072                                 group_member = true;
2073                         } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
2074                                    valid_key) {
2075                                 field->ltype_match |= NPC_LT_LD_SCTP;
2076                                 group_member = true;
2077                         }
2078                         field->ltype_mask = ~field->ltype_match;
2079                         if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
2080                                 /* Handle the case where any of the group item
2081                                  * is enabled in the group but not the final one
2082                                  */
2083                                 if (group_member) {
2084                                         valid_key = true;
2085                                         group_member = false;
2086                                 }
2087                                 field_marker = true;
2088                                 keyoff_marker = true;
2089                         } else {
2090                                 field_marker = false;
2091                                 keyoff_marker = false;
2092                         }
2093                         break;
2094                 }
2095                 field->ena = 1;
2096 
2097                 /* Found a valid flow key type */
2098                 if (valid_key) {
2099                         field->key_offset = key_off;
2100                         memcpy(&alg[nr_field], field, sizeof(*field));
2101                         max_key_off = max(max_key_off, field->bytesm1 + 1);
2102 
2103                         /* Found a field marker, get the next field */
2104                         if (field_marker)
2105                                 nr_field++;
2106                 }
2107 
2108                 /* Found a keyoff marker, update the new key_off */
2109                 if (keyoff_marker) {
2110                         key_off += max_key_off;
2111                         max_key_off = 0;
2112                 }
2113         }
2114         /* Processed all the flow key types */
2115         if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2116                 return 0;
2117         else
2118                 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2119 }
2120 
2121 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2122 {
2123         u64 field[FIELDS_PER_ALG];
2124         struct nix_hw *hw;
2125         int fid, rc;
2126 
2127         hw = get_nix_hw(rvu->hw, blkaddr);
2128         if (!hw)
2129                 return -EINVAL;
2130 
2131         /* No room to add new flow hash algoritham */
2132         if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2133                 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2134 
2135         /* Generate algo fields for the given flow_cfg */
2136         rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2137         if (rc)
2138                 return rc;
2139 
2140         /* Update ALGX_FIELDX register with generated fields */
2141         for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2142                 rvu_write64(rvu, blkaddr,
2143                             NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2144                                                            fid), field[fid]);
2145 
2146         /* Store the flow_cfg for futher lookup */
2147         rc = hw->flowkey.in_use;
2148         hw->flowkey.flowkey[rc] = flow_cfg;
2149         hw->flowkey.in_use++;
2150 
2151         return rc;
2152 }
2153 
2154 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2155                                          struct nix_rss_flowkey_cfg *req,
2156                                          struct nix_rss_flowkey_cfg_rsp *rsp)
2157 {
2158         struct rvu_hwinfo *hw = rvu->hw;
2159         u16 pcifunc = req->hdr.pcifunc;
2160         int alg_idx, nixlf, blkaddr;
2161         struct nix_hw *nix_hw;
2162 
2163         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2164         if (blkaddr < 0)
2165                 return NIX_AF_ERR_AF_LF_INVALID;
2166 
2167         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2168         if (nixlf < 0)
2169                 return NIX_AF_ERR_AF_LF_INVALID;
2170 
2171         nix_hw = get_nix_hw(rvu->hw, blkaddr);
2172         if (!nix_hw)
2173                 return -EINVAL;
2174 
2175         alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2176         /* Failed to get algo index from the exiting list, reserve new  */
2177         if (alg_idx < 0) {
2178                 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2179                                                   req->flowkey_cfg);
2180                 if (alg_idx < 0)
2181                         return alg_idx;
2182         }
2183         rsp->alg_idx = alg_idx;
2184         rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2185                                        alg_idx, req->mcam_index);
2186         return 0;
2187 }
2188 
2189 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2190 {
2191         u32 flowkey_cfg, minkey_cfg;
2192         int alg, fid, rc;
2193 
2194         /* Disable all flow key algx fieldx */
2195         for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2196                 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2197                         rvu_write64(rvu, blkaddr,
2198                                     NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2199                                     0);
2200         }
2201 
2202         /* IPv4/IPv6 SIP/DIPs */
2203         flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2204         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2205         if (rc < 0)
2206                 return rc;
2207 
2208         /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2209         minkey_cfg = flowkey_cfg;
2210         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2211         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2212         if (rc < 0)
2213                 return rc;
2214 
2215         /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2216         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2217         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2218         if (rc < 0)
2219                 return rc;
2220 
2221         /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2222         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2223         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2224         if (rc < 0)
2225                 return rc;
2226 
2227         /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2228         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2229                         NIX_FLOW_KEY_TYPE_UDP;
2230         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2231         if (rc < 0)
2232                 return rc;
2233 
2234         /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2235         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2236                         NIX_FLOW_KEY_TYPE_SCTP;
2237         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2238         if (rc < 0)
2239                 return rc;
2240 
2241         /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2242         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2243                         NIX_FLOW_KEY_TYPE_SCTP;
2244         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2245         if (rc < 0)
2246                 return rc;
2247 
2248         /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2249         flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2250                       NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2251         rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2252         if (rc < 0)
2253                 return rc;
2254 
2255         return 0;
2256 }
2257 
2258 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2259                                       struct nix_set_mac_addr *req,
2260                                       struct msg_rsp *rsp)
2261 {
2262         struct rvu_hwinfo *hw = rvu->hw;
2263         u16 pcifunc = req->hdr.pcifunc;
2264         struct rvu_pfvf *pfvf;
2265         int blkaddr, nixlf;
2266 
2267         pfvf = rvu_get_pfvf(rvu, pcifunc);
2268         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2269         if (!pfvf->nixlf || blkaddr < 0)
2270                 return NIX_AF_ERR_AF_LF_INVALID;
2271 
2272         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2273         if (nixlf < 0)
2274                 return NIX_AF_ERR_AF_LF_INVALID;
2275 
2276         ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2277 
2278         rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2279                                     pfvf->rx_chan_base, req->mac_addr);
2280 
2281         rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2282 
2283         return 0;
2284 }
2285 
2286 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2287                                      struct msg_rsp *rsp)
2288 {
2289         bool allmulti = false, disable_promisc = false;
2290         struct rvu_hwinfo *hw = rvu->hw;
2291         u16 pcifunc = req->hdr.pcifunc;
2292         struct rvu_pfvf *pfvf;
2293         int blkaddr, nixlf;
2294 
2295         pfvf = rvu_get_pfvf(rvu, pcifunc);
2296         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2297         if (!pfvf->nixlf || blkaddr < 0)
2298                 return NIX_AF_ERR_AF_LF_INVALID;
2299 
2300         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2301         if (nixlf < 0)
2302                 return NIX_AF_ERR_AF_LF_INVALID;
2303 
2304         if (req->mode & NIX_RX_MODE_PROMISC)
2305                 allmulti = false;
2306         else if (req->mode & NIX_RX_MODE_ALLMULTI)
2307                 allmulti = true;
2308         else
2309                 disable_promisc = true;
2310 
2311         if (disable_promisc)
2312                 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2313         else
2314                 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2315                                               pfvf->rx_chan_base, allmulti);
2316 
2317         rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2318 
2319         return 0;
2320 }
2321 
2322 static void nix_find_link_frs(struct rvu *rvu,
2323                               struct nix_frs_cfg *req, u16 pcifunc)
2324 {
2325         int pf = rvu_get_pf(pcifunc);
2326         struct rvu_pfvf *pfvf;
2327         int maxlen, minlen;
2328         int numvfs, hwvf;
2329         int vf;
2330 
2331         /* Update with requester's min/max lengths */
2332         pfvf = rvu_get_pfvf(rvu, pcifunc);
2333         pfvf->maxlen = req->maxlen;
2334         if (req->update_minlen)
2335                 pfvf->minlen = req->minlen;
2336 
2337         maxlen = req->maxlen;
2338         minlen = req->update_minlen ? req->minlen : 0;
2339 
2340         /* Get this PF's numVFs and starting hwvf */
2341         rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2342 
2343         /* For each VF, compare requested max/minlen */
2344         for (vf = 0; vf < numvfs; vf++) {
2345                 pfvf =  &rvu->hwvf[hwvf + vf];
2346                 if (pfvf->maxlen > maxlen)
2347                         maxlen = pfvf->maxlen;
2348                 if (req->update_minlen &&
2349                     pfvf->minlen && pfvf->minlen < minlen)
2350                         minlen = pfvf->minlen;
2351         }
2352 
2353         /* Compare requested max/minlen with PF's max/minlen */
2354         pfvf = &rvu->pf[pf];
2355         if (pfvf->maxlen > maxlen)
2356                 maxlen = pfvf->maxlen;
2357         if (req->update_minlen &&
2358             pfvf->minlen && pfvf->minlen < minlen)
2359                 minlen = pfvf->minlen;
2360 
2361         /* Update the request with max/min PF's and it's VF's max/min */
2362         req->maxlen = maxlen;
2363         if (req->update_minlen)
2364                 req->minlen = minlen;
2365 }
2366 
2367 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2368                                     struct msg_rsp *rsp)
2369 {
2370         struct rvu_hwinfo *hw = rvu->hw;
2371         u16 pcifunc = req->hdr.pcifunc;
2372         int pf = rvu_get_pf(pcifunc);
2373         int blkaddr, schq, link = -1;
2374         struct nix_txsch *txsch;
2375         u64 cfg, lmac_fifo_len;
2376         struct nix_hw *nix_hw;
2377         u8 cgx = 0, lmac = 0;
2378 
2379         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2380         if (blkaddr < 0)
2381                 return NIX_AF_ERR_AF_LF_INVALID;
2382 
2383         nix_hw = get_nix_hw(rvu->hw, blkaddr);
2384         if (!nix_hw)
2385                 return -EINVAL;
2386 
2387         if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2388                 return NIX_AF_ERR_FRS_INVALID;
2389 
2390         if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2391                 return NIX_AF_ERR_FRS_INVALID;
2392 
2393         /* Check if requester wants to update SMQ's */
2394         if (!req->update_smq)
2395                 goto rx_frscfg;
2396 
2397         /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2398         txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2399         mutex_lock(&rvu->rsrc_lock);
2400         for (schq = 0; schq < txsch->schq.max; schq++) {
2401                 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2402                         continue;
2403                 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2404                 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2405                 if (req->update_minlen)
2406                         cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2407                 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2408         }
2409         mutex_unlock(&rvu->rsrc_lock);
2410 
2411 rx_frscfg:
2412         /* Check if config is for SDP link */
2413         if (req->sdp_link) {
2414                 if (!hw->sdp_links)
2415                         return NIX_AF_ERR_RX_LINK_INVALID;
2416                 link = hw->cgx_links + hw->lbk_links;
2417                 goto linkcfg;
2418         }
2419 
2420         /* Check if the request is from CGX mapped RVU PF */
2421         if (is_pf_cgxmapped(rvu, pf)) {
2422                 /* Get CGX and LMAC to which this PF is mapped and find link */
2423                 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2424                 link = (cgx * hw->lmac_per_cgx) + lmac;
2425         } else if (pf == 0) {
2426                 /* For VFs of PF0 ingress is LBK port, so config LBK link */
2427                 link = hw->cgx_links;
2428         }
2429 
2430         if (link < 0)
2431                 return NIX_AF_ERR_RX_LINK_INVALID;
2432 
2433         nix_find_link_frs(rvu, req, pcifunc);
2434 
2435 linkcfg:
2436         cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2437         cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2438         if (req->update_minlen)
2439                 cfg = (cfg & ~0xFFFFULL) | req->minlen;
2440         rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2441 
2442         if (req->sdp_link || pf == 0)
2443                 return 0;
2444 
2445         /* Update transmit credits for CGX links */
2446         lmac_fifo_len =
2447                 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2448         cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2449         cfg &= ~(0xFFFFFULL << 12);
2450         cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2451         rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2452         rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
2453 
2454         return 0;
2455 }
2456 
2457 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2458                                       struct msg_rsp *rsp)
2459 {
2460         struct npc_mcam_alloc_entry_req alloc_req = { };
2461         struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2462         struct npc_mcam_free_entry_req free_req = { };
2463         u16 pcifunc = req->hdr.pcifunc;
2464         int blkaddr, nixlf, err;
2465         struct rvu_pfvf *pfvf;
2466 
2467         /* LBK VFs do not have separate MCAM UCAST entry hence
2468          * skip allocating rxvlan for them
2469          */
2470         if (is_afvf(pcifunc))
2471                 return 0;
2472 
2473         pfvf = rvu_get_pfvf(rvu, pcifunc);
2474         if (pfvf->rxvlan)
2475                 return 0;
2476 
2477         /* alloc new mcam entry */
2478         alloc_req.hdr.pcifunc = pcifunc;
2479         alloc_req.count = 1;
2480 
2481         err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2482                                                     &alloc_rsp);
2483         if (err)
2484                 return err;
2485 
2486         /* update entry to enable rxvlan offload */
2487         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2488         if (blkaddr < 0) {
2489                 err = NIX_AF_ERR_AF_LF_INVALID;
2490                 goto free_entry;
2491         }
2492 
2493         nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2494         if (nixlf < 0) {
2495                 err = NIX_AF_ERR_AF_LF_INVALID;
2496                 goto free_entry;
2497         }
2498 
2499         pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2500         /* all it means is that rxvlan_index is valid */
2501         pfvf->rxvlan = true;
2502 
2503         err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2504         if (err)
2505                 goto free_entry;
2506 
2507         return 0;
2508 free_entry:
2509         free_req.hdr.pcifunc = pcifunc;
2510         free_req.entry = alloc_rsp.entry_list[0];
2511         rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2512         pfvf->rxvlan = false;
2513         return err;
2514 }
2515 
2516 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2517                                     struct msg_rsp *rsp)
2518 {
2519         struct rvu_hwinfo *hw = rvu->hw;
2520         u16 pcifunc = req->hdr.pcifunc;
2521         struct rvu_block *block;
2522         struct rvu_pfvf *pfvf;
2523         int nixlf, blkaddr;
2524         u64 cfg;
2525 
2526         pfvf = rvu_get_pfvf(rvu, pcifunc);
2527         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2528         if (!pfvf->nixlf || blkaddr < 0)
2529                 return NIX_AF_ERR_AF_LF_INVALID;
2530 
2531         block = &hw->block[blkaddr];
2532         nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
2533         if (nixlf < 0)
2534                 return NIX_AF_ERR_AF_LF_INVALID;
2535 
2536         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2537         /* Set the interface configuration */
2538         if (req->len_verify & BIT(0))
2539                 cfg |= BIT_ULL(41);
2540         else
2541                 cfg &= ~BIT_ULL(41);
2542 
2543         if (req->len_verify & BIT(1))
2544                 cfg |= BIT_ULL(40);
2545         else
2546                 cfg &= ~BIT_ULL(40);
2547 
2548         if (req->csum_verify & BIT(0))
2549                 cfg |= BIT_ULL(37);
2550         else
2551                 cfg &= ~BIT_ULL(37);
2552 
2553         rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2554 
2555         return 0;
2556 }
2557 
2558 static void nix_link_config(struct rvu *rvu, int blkaddr)
2559 {
2560         struct rvu_hwinfo *hw = rvu->hw;
2561         int cgx, lmac_cnt, slink, link;
2562         u64 tx_credits;
2563 
2564         /* Set default min/max packet lengths allowed on NIX Rx links.
2565          *
2566          * With HW reset minlen value of 60byte, HW will treat ARP pkts
2567          * as undersize and report them to SW as error pkts, hence
2568          * setting it to 40 bytes.
2569          */
2570         for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2571                 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2572                             NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2573         }
2574 
2575         if (hw->sdp_links) {
2576                 link = hw->cgx_links + hw->lbk_links;
2577                 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2578                             SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2579         }
2580 
2581         /* Set credits for Tx links assuming max packet length allowed.
2582          * This will be reconfigured based on MTU set for PF/VF.
2583          */
2584         for (cgx = 0; cgx < hw->cgx; cgx++) {
2585                 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2586                 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2587                 /* Enable credits and set credit pkt count to max allowed */
2588                 tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2589                 slink = cgx * hw->lmac_per_cgx;
2590                 for (link = slink; link < (slink + lmac_cnt); link++) {
2591                         rvu_write64(rvu, blkaddr,
2592                                     NIX_AF_TX_LINKX_NORM_CREDIT(link),
2593                                     tx_credits);
2594                         rvu_write64(rvu, blkaddr,
2595                                     NIX_AF_TX_LINKX_EXPR_CREDIT(link),
2596                                     tx_credits);
2597                 }
2598         }
2599 
2600         /* Set Tx credits for LBK link */
2601         slink = hw->cgx_links;
2602         for (link = slink; link < (slink + hw->lbk_links); link++) {
2603                 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2604                 /* Enable credits and set credit pkt count to max allowed */
2605                 tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2606                 rvu_write64(rvu, blkaddr,
2607                             NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2608                 rvu_write64(rvu, blkaddr,
2609                             NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
2610         }
2611 }
2612 
2613 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2614 {
2615         int idx, err;
2616         u64 status;
2617 
2618         /* Start X2P bus calibration */
2619         rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2620                     rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2621         /* Wait for calibration to complete */
2622         err = rvu_poll_reg(rvu, blkaddr,
2623                            NIX_AF_STATUS, BIT_ULL(10), false);
2624         if (err) {
2625                 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2626                 return err;
2627         }
2628 
2629         status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2630         /* Check if CGX devices are ready */
2631         for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2632                 /* Skip when cgx port is not available */
2633                 if (!rvu_cgx_pdata(idx, rvu) ||
2634                     (status & (BIT_ULL(16 + idx))))
2635                         continue;
2636                 dev_err(rvu->dev,
2637                         "CGX%d didn't respond to NIX X2P calibration\n", idx);
2638                 err = -EBUSY;
2639         }
2640 
2641         /* Check if LBK is ready */
2642         if (!(status & BIT_ULL(19))) {
2643                 dev_err(rvu->dev,
2644                         "LBK didn't respond to NIX X2P calibration\n");
2645                 err = -EBUSY;
2646         }
2647 
2648         /* Clear 'calibrate_x2p' bit */
2649         rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2650                     rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2651         if (err || (status & 0x3FFULL))
2652                 dev_err(rvu->dev,
2653                         "NIX X2P calibration failed, status 0x%llx\n", status);
2654         if (err)
2655                 return err;
2656         return 0;
2657 }
2658 
2659 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2660 {
2661         u64 cfg;
2662         int err;
2663 
2664         /* Set admin queue endianness */
2665         cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2666 #ifdef __BIG_ENDIAN
2667         cfg |= BIT_ULL(8);
2668         rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2669 #else
2670         cfg &= ~BIT_ULL(8);
2671         rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2672 #endif
2673 
2674         /* Do not bypass NDC cache */
2675         cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2676         cfg &= ~0x3FFEULL;
2677         rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2678 
2679         /* Result structure can be followed by RQ/SQ/CQ context at
2680          * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2681          * operation type. Alloc sufficient result memory for all operations.
2682          */
2683         err = rvu_aq_alloc(rvu, &block->aq,
2684                            Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2685                            ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2686         if (err)
2687                 return err;
2688 
2689         rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2690         rvu_write64(rvu, block->addr,
2691                     NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2692         return 0;
2693 }
2694 
2695 int rvu_nix_init(struct rvu *rvu)
2696 {
2697         struct rvu_hwinfo *hw = rvu->hw;
2698         struct rvu_block *block;
2699         int blkaddr, err;
2700         u64 cfg;
2701 
2702         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2703         if (blkaddr < 0)
2704                 return 0;
2705         block = &hw->block[blkaddr];
2706 
2707         /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
2708          * internal state when conditional clocks are turned off.
2709          * Hence enable them.
2710          */
2711         if (is_rvu_9xxx_A0(rvu))
2712                 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2713                             rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
2714 
2715         /* Calibrate X2P bus to check if CGX/LBK links are fine */
2716         err = nix_calibrate_x2p(rvu, blkaddr);
2717         if (err)
2718                 return err;
2719 
2720         /* Set num of links of each type */
2721         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2722         hw->cgx = (cfg >> 12) & 0xF;
2723         hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2724         hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2725         hw->lbk_links = 1;
2726         hw->sdp_links = 1;
2727 
2728         /* Initialize admin queue */
2729         err = nix_aq_init(rvu, block);
2730         if (err)
2731                 return err;
2732 
2733         /* Restore CINT timer delay to HW reset values */
2734         rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
2735 
2736         if (blkaddr == BLKADDR_NIX0) {
2737                 hw->nix0 = devm_kzalloc(rvu->dev,
2738                                         sizeof(struct nix_hw), GFP_KERNEL);
2739                 if (!hw->nix0)
2740                         return -ENOMEM;
2741 
2742                 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
2743                 if (err)
2744                         return err;
2745 
2746                 err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
2747                 if (err)
2748                         return err;
2749 
2750                 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
2751                 if (err)
2752                         return err;
2753 
2754                 /* Configure segmentation offload formats */
2755                 nix_setup_lso(rvu, hw->nix0, blkaddr);
2756 
2757                 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
2758                  * This helps HW protocol checker to identify headers
2759                  * and validate length and checksums.
2760                  */
2761                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
2762                             (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
2763                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
2764                             (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
2765                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
2766                             (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
2767                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
2768                             (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
2769                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
2770                             (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
2771                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
2772                             (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
2773                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
2774                             (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
2775                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
2776                             (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
2777                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
2778                             (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
2779                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
2780                             (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
2781                 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
2782                             (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
2783                             0x0F);
2784 
2785                 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
2786                 if (err)
2787                         return err;
2788 
2789                 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
2790                 nix_link_config(rvu, blkaddr);
2791         }
2792         return 0;
2793 }
2794 
2795 void rvu_nix_freemem(struct rvu *rvu)
2796 {
2797         struct rvu_hwinfo *hw = rvu->hw;
2798         struct rvu_block *block;
2799         struct nix_txsch *txsch;
2800         struct nix_mcast *mcast;
2801         struct nix_hw *nix_hw;
2802         int blkaddr, lvl;
2803 
2804         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2805         if (blkaddr < 0)
2806                 return;
2807 
2808         block = &hw->block[blkaddr];
2809         rvu_aq_free(rvu, block->aq);
2810 
2811         if (blkaddr == BLKADDR_NIX0) {
2812                 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2813                 if (!nix_hw)
2814                         return;
2815 
2816                 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2817                         txsch = &nix_hw->txsch[lvl];
2818                         kfree(txsch->schq.bmap);
2819                 }
2820 
2821                 mcast = &nix_hw->mcast;
2822                 qmem_free(rvu->dev, mcast->mce_ctx);
2823                 qmem_free(rvu->dev, mcast->mcast_buf);
2824                 mutex_destroy(&mcast->mce_lock);
2825         }
2826 }
2827 
2828 static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
2829 {
2830         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2831         struct rvu_hwinfo *hw = rvu->hw;
2832         int blkaddr;
2833 
2834         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2835         if (!pfvf->nixlf || blkaddr < 0)
2836                 return NIX_AF_ERR_AF_LF_INVALID;
2837 
2838         *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2839         if (*nixlf < 0)
2840                 return NIX_AF_ERR_AF_LF_INVALID;
2841 
2842         return 0;
2843 }
2844 
2845 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
2846                                      struct msg_rsp *rsp)
2847 {
2848         u16 pcifunc = req->hdr.pcifunc;
2849         int nixlf, err;
2850 
2851         err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2852         if (err)
2853                 return err;
2854 
2855         rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
2856         return 0;
2857 }
2858 
2859 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
2860                                     struct msg_rsp *rsp)
2861 {
2862         u16 pcifunc = req->hdr.pcifunc;
2863         int nixlf, err;
2864 
2865         err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2866         if (err)
2867                 return err;
2868 
2869         rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
2870         return 0;
2871 }
2872 
2873 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
2874 {
2875         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2876         struct hwctx_disable_req ctx_req;
2877         int err;
2878 
2879         ctx_req.hdr.pcifunc = pcifunc;
2880 
2881         /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
2882         nix_interface_deinit(rvu, pcifunc, nixlf);
2883         nix_rx_sync(rvu, blkaddr);
2884         nix_txschq_free(rvu, pcifunc);
2885 
2886         if (pfvf->sq_ctx) {
2887                 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
2888                 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2889                 if (err)
2890                         dev_err(rvu->dev, "SQ ctx disable failed\n");
2891         }
2892 
2893         if (pfvf->rq_ctx) {
2894                 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
2895                 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2896                 if (err)
2897                         dev_err(rvu->dev, "RQ ctx disable failed\n");
2898         }
2899 
2900         if (pfvf->cq_ctx) {
2901                 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
2902                 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2903                 if (err)
2904                         dev_err(rvu->dev, "CQ ctx disable failed\n");
2905         }
2906 
2907         nix_ctx_free(rvu, pfvf);
2908 }
2909 
2910 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
2911                                         struct nix_lso_format_cfg *req,
2912                                         struct nix_lso_format_cfg_rsp *rsp)
2913 {
2914         u16 pcifunc = req->hdr.pcifunc;
2915         struct nix_hw *nix_hw;
2916         struct rvu_pfvf *pfvf;
2917         int blkaddr, idx, f;
2918         u64 reg;
2919 
2920         pfvf = rvu_get_pfvf(rvu, pcifunc);
2921         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2922         if (!pfvf->nixlf || blkaddr < 0)
2923                 return NIX_AF_ERR_AF_LF_INVALID;
2924 
2925         nix_hw = get_nix_hw(rvu->hw, blkaddr);
2926         if (!nix_hw)
2927                 return -EINVAL;
2928 
2929         /* Find existing matching LSO format, if any */
2930         for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
2931                 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
2932                         reg = rvu_read64(rvu, blkaddr,
2933                                          NIX_AF_LSO_FORMATX_FIELDX(idx, f));
2934                         if (req->fields[f] != (reg & req->field_mask))
2935                                 break;
2936                 }
2937 
2938                 if (f == NIX_LSO_FIELD_MAX)
2939                         break;
2940         }
2941 
2942         if (idx < nix_hw->lso.in_use) {
2943                 /* Match found */
2944                 rsp->lso_format_idx = idx;
2945                 return 0;
2946         }
2947 
2948         if (nix_hw->lso.in_use == nix_hw->lso.total)
2949                 return NIX_AF_ERR_LSO_CFG_FAIL;
2950 
2951         rsp->lso_format_idx = nix_hw->lso.in_use++;
2952 
2953         for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
2954                 rvu_write64(rvu, blkaddr,
2955                             NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
2956                             req->fields[f]);
2957 
2958         return 0;
2959 }

/* [<][>][^][v][top][bottom][index][help] */