This source file includes following definitions.
- ice_pkg_val_buf
- ice_find_buf_table
- ice_pkg_enum_buf
- ice_pkg_advance_sect
- ice_pkg_enum_section
- ice_acquire_global_cfg_lock
- ice_release_global_cfg_lock
- ice_aq_download_pkg
- ice_find_seg_in_pkg
- ice_dwnld_cfg_bufs
- ice_aq_get_pkg_info_list
- ice_download_pkg
- ice_init_pkg_info
- ice_get_pkg_info
- ice_verify_pkg
- ice_free_seg
- ice_init_pkg_regs
- ice_chk_pkg_version
- ice_init_pkg
- ice_copy_and_init_pkg
- ice_ptg_find_ptype
- ice_ptg_alloc_val
- ice_ptg_remove_ptype
- ice_ptg_add_mv_ptype
- ice_vsig_find_vsi
- ice_vsig_alloc_val
- ice_vsig_remove_vsi
- ice_vsig_add_mv_vsi
- ice_init_sw_xlt1_db
- ice_init_sw_xlt2_db
- ice_init_sw_db
- ice_fill_tbl
- ice_fill_blk_tbls
- ice_free_hw_tbls
- ice_clear_hw_tbls
- ice_init_hw_tbls
   1 
   2 
   3 
   4 #include "ice_common.h"
   5 #include "ice_flex_pipe.h"
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
  14 {
  15         struct ice_buf_hdr *hdr;
  16         u16 section_count;
  17         u16 data_end;
  18 
  19         hdr = (struct ice_buf_hdr *)buf->buf;
  20         
  21         section_count = le16_to_cpu(hdr->section_count);
  22         if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
  23                 return NULL;
  24 
  25         data_end = le16_to_cpu(hdr->data_end);
  26         if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
  27                 return NULL;
  28 
  29         return hdr;
  30 }
  31 
  32 
  33 
  34 
  35 
  36 
  37 
  38 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
  39 {
  40         struct ice_nvm_table *nvms;
  41 
  42         nvms = (struct ice_nvm_table *)
  43                 (ice_seg->device_table +
  44                  le32_to_cpu(ice_seg->device_table_count));
  45 
  46         return (__force struct ice_buf_table *)
  47                 (nvms->vers + le32_to_cpu(nvms->table_count));
  48 }
  49 
  50 
  51 
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 static struct ice_buf_hdr *
  63 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
  64 {
  65         if (ice_seg) {
  66                 state->buf_table = ice_find_buf_table(ice_seg);
  67                 if (!state->buf_table)
  68                         return NULL;
  69 
  70                 state->buf_idx = 0;
  71                 return ice_pkg_val_buf(state->buf_table->buf_array);
  72         }
  73 
  74         if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
  75                 return ice_pkg_val_buf(state->buf_table->buf_array +
  76                                        state->buf_idx);
  77         else
  78                 return NULL;
  79 }
  80 
  81 
  82 
  83 
  84 
  85 
  86 
  87 
  88 
  89 static bool
  90 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
  91 {
  92         if (!ice_seg && !state->buf)
  93                 return false;
  94 
  95         if (!ice_seg && state->buf)
  96                 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
  97                         return true;
  98 
  99         state->buf = ice_pkg_enum_buf(ice_seg, state);
 100         if (!state->buf)
 101                 return false;
 102 
 103         
 104         state->sect_idx = 0;
 105         return true;
 106 }
 107 
 108 
 109 
 110 
 111 
 112 
 113 
 114 
 115 
 116 
 117 
 118 
 119 
 120 static void *
 121 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
 122                      u32 sect_type)
 123 {
 124         u16 offset, size;
 125 
 126         if (ice_seg)
 127                 state->type = sect_type;
 128 
 129         if (!ice_pkg_advance_sect(ice_seg, state))
 130                 return NULL;
 131 
 132         
 133         while (state->buf->section_entry[state->sect_idx].type !=
 134                cpu_to_le32(state->type))
 135                 if (!ice_pkg_advance_sect(NULL, state))
 136                         return NULL;
 137 
 138         
 139         offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
 140         if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
 141                 return NULL;
 142 
 143         size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
 144         if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
 145                 return NULL;
 146 
 147         
 148         if (offset + size > ICE_PKG_BUF_SIZE)
 149                 return NULL;
 150 
 151         state->sect_type =
 152                 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
 153 
 154         
 155         state->sect = ((u8 *)state->buf) +
 156                 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
 157 
 158         return state->sect;
 159 }
 160 
 161 
 162 
 163 
 164 
 165 
 166 
 167 
 168 
 169 
 170 
 171 
 172 
 173 
 174 
 175 
 176 
 177 static enum ice_status
 178 ice_acquire_global_cfg_lock(struct ice_hw *hw,
 179                             enum ice_aq_res_access_type access)
 180 {
 181         enum ice_status status;
 182 
 183         status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
 184                                  ICE_GLOBAL_CFG_LOCK_TIMEOUT);
 185 
 186         if (!status)
 187                 mutex_lock(&ice_global_cfg_lock_sw);
 188         else if (status == ICE_ERR_AQ_NO_WORK)
 189                 ice_debug(hw, ICE_DBG_PKG,
 190                           "Global config lock: No work to do\n");
 191 
 192         return status;
 193 }
 194 
 195 
 196 
 197 
 198 
 199 
 200 
 201 static void ice_release_global_cfg_lock(struct ice_hw *hw)
 202 {
 203         mutex_unlock(&ice_global_cfg_lock_sw);
 204         ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
 205 }
 206 
 207 
 208 
 209 
 210 
 211 
 212 
 213 
 214 
 215 
 216 
 217 
 218 
 219 static enum ice_status
 220 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
 221                     u16 buf_size, bool last_buf, u32 *error_offset,
 222                     u32 *error_info, struct ice_sq_cd *cd)
 223 {
 224         struct ice_aqc_download_pkg *cmd;
 225         struct ice_aq_desc desc;
 226         enum ice_status status;
 227 
 228         if (error_offset)
 229                 *error_offset = 0;
 230         if (error_info)
 231                 *error_info = 0;
 232 
 233         cmd = &desc.params.download_pkg;
 234         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
 235         desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 236 
 237         if (last_buf)
 238                 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
 239 
 240         status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
 241         if (status == ICE_ERR_AQ_ERROR) {
 242                 
 243                 struct ice_aqc_download_pkg_resp *resp;
 244 
 245                 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
 246                 if (error_offset)
 247                         *error_offset = le32_to_cpu(resp->error_offset);
 248                 if (error_info)
 249                         *error_info = le32_to_cpu(resp->error_info);
 250         }
 251 
 252         return status;
 253 }
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 
 262 
 263 
 264 
 265 static struct ice_generic_seg_hdr *
 266 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
 267                     struct ice_pkg_hdr *pkg_hdr)
 268 {
 269         u32 i;
 270 
 271         ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
 272                   pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
 273                   pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
 274 
 275         
 276         for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
 277                 struct ice_generic_seg_hdr *seg;
 278 
 279                 seg = (struct ice_generic_seg_hdr *)
 280                         ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
 281 
 282                 if (le32_to_cpu(seg->seg_type) == seg_type)
 283                         return seg;
 284         }
 285 
 286         return NULL;
 287 }
 288 
 289 
 290 
 291 
 292 
 293 
 294 
 295 
 296 
 297 
 298 
 299 static enum ice_status
 300 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
 301 {
 302         enum ice_status status;
 303         struct ice_buf_hdr *bh;
 304         u32 offset, info, i;
 305 
 306         if (!bufs || !count)
 307                 return ICE_ERR_PARAM;
 308 
 309         
 310 
 311 
 312 
 313         bh = (struct ice_buf_hdr *)bufs;
 314         if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
 315                 return 0;
 316 
 317         
 318 
 319 
 320         hw->pkg_dwnld_status = ICE_AQ_RC_OK;
 321 
 322         status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
 323         if (status) {
 324                 if (status == ICE_ERR_AQ_NO_WORK)
 325                         hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
 326                 else
 327                         hw->pkg_dwnld_status = hw->adminq.sq_last_status;
 328                 return status;
 329         }
 330 
 331         for (i = 0; i < count; i++) {
 332                 bool last = ((i + 1) == count);
 333 
 334                 if (!last) {
 335                         
 336                         bh = (struct ice_buf_hdr *)(bufs + i + 1);
 337 
 338                         
 339 
 340 
 341 
 342                         if (le16_to_cpu(bh->section_count))
 343                                 if (le32_to_cpu(bh->section_entry[0].type) &
 344                                     ICE_METADATA_BUF)
 345                                         last = true;
 346                 }
 347 
 348                 bh = (struct ice_buf_hdr *)(bufs + i);
 349 
 350                 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
 351                                              &offset, &info, NULL);
 352 
 353                 
 354                 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
 355                 if (status) {
 356                         ice_debug(hw, ICE_DBG_PKG,
 357                                   "Pkg download failed: err %d off %d inf %d\n",
 358                                   status, offset, info);
 359 
 360                         break;
 361                 }
 362 
 363                 if (last)
 364                         break;
 365         }
 366 
 367         ice_release_global_cfg_lock(hw);
 368 
 369         return status;
 370 }
 371 
 372 
 373 
 374 
 375 
 376 
 377 
 378 
 379 
 380 
 381 static enum ice_status
 382 ice_aq_get_pkg_info_list(struct ice_hw *hw,
 383                          struct ice_aqc_get_pkg_info_resp *pkg_info,
 384                          u16 buf_size, struct ice_sq_cd *cd)
 385 {
 386         struct ice_aq_desc desc;
 387 
 388         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
 389 
 390         return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
 391 }
 392 
 393 
 394 
 395 
 396 
 397 
 398 
 399 
 400 static enum ice_status
 401 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
 402 {
 403         struct ice_buf_table *ice_buf_tbl;
 404 
 405         ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
 406                   ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
 407                   ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
 408 
 409         ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
 410                   le32_to_cpu(ice_seg->hdr.seg_type),
 411                   le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
 412 
 413         ice_buf_tbl = ice_find_buf_table(ice_seg);
 414 
 415         ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
 416                   le32_to_cpu(ice_buf_tbl->buf_count));
 417 
 418         return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
 419                                   le32_to_cpu(ice_buf_tbl->buf_count));
 420 }
 421 
 422 
 423 
 424 
 425 
 426 
 427 
 428 
 429 static enum ice_status
 430 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
 431 {
 432         struct ice_global_metadata_seg *meta_seg;
 433         struct ice_generic_seg_hdr *seg_hdr;
 434 
 435         if (!pkg_hdr)
 436                 return ICE_ERR_PARAM;
 437 
 438         meta_seg = (struct ice_global_metadata_seg *)
 439                    ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
 440         if (meta_seg) {
 441                 hw->pkg_ver = meta_seg->pkg_ver;
 442                 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
 443 
 444                 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
 445                           meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
 446                           meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
 447                           meta_seg->pkg_name);
 448         } else {
 449                 ice_debug(hw, ICE_DBG_INIT,
 450                           "Did not find metadata segment in driver package\n");
 451                 return ICE_ERR_CFG;
 452         }
 453 
 454         seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
 455         if (seg_hdr) {
 456                 hw->ice_pkg_ver = seg_hdr->seg_ver;
 457                 memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
 458                        sizeof(hw->ice_pkg_name));
 459 
 460                 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
 461                           seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
 462                           seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
 463                           seg_hdr->seg_name);
 464         } else {
 465                 ice_debug(hw, ICE_DBG_INIT,
 466                           "Did not find ice segment in driver package\n");
 467                 return ICE_ERR_CFG;
 468         }
 469 
 470         return 0;
 471 }
 472 
 473 
 474 
 475 
 476 
 477 
 478 
 479 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
 480 {
 481         struct ice_aqc_get_pkg_info_resp *pkg_info;
 482         enum ice_status status;
 483         u16 size;
 484         u32 i;
 485 
 486         size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
 487                                     (ICE_PKG_CNT - 1));
 488         pkg_info = kzalloc(size, GFP_KERNEL);
 489         if (!pkg_info)
 490                 return ICE_ERR_NO_MEMORY;
 491 
 492         status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
 493         if (status)
 494                 goto init_pkg_free_alloc;
 495 
 496         for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
 497 #define ICE_PKG_FLAG_COUNT      4
 498                 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
 499                 u8 place = 0;
 500 
 501                 if (pkg_info->pkg_info[i].is_active) {
 502                         flags[place++] = 'A';
 503                         hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
 504                         memcpy(hw->active_pkg_name,
 505                                pkg_info->pkg_info[i].name,
 506                                sizeof(hw->active_pkg_name));
 507                         hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
 508                 }
 509                 if (pkg_info->pkg_info[i].is_active_at_boot)
 510                         flags[place++] = 'B';
 511                 if (pkg_info->pkg_info[i].is_modified)
 512                         flags[place++] = 'M';
 513                 if (pkg_info->pkg_info[i].is_in_nvm)
 514                         flags[place++] = 'N';
 515 
 516                 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
 517                           i, pkg_info->pkg_info[i].ver.major,
 518                           pkg_info->pkg_info[i].ver.minor,
 519                           pkg_info->pkg_info[i].ver.update,
 520                           pkg_info->pkg_info[i].ver.draft,
 521                           pkg_info->pkg_info[i].name, flags);
 522         }
 523 
 524 init_pkg_free_alloc:
 525         kfree(pkg_info);
 526 
 527         return status;
 528 }
 529 
 530 
 531 
 532 
 533 
 534 
 535 
 536 
 537 
 538 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
 539 {
 540         u32 seg_count;
 541         u32 i;
 542 
 543         if (len < sizeof(*pkg))
 544                 return ICE_ERR_BUF_TOO_SHORT;
 545 
 546         if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
 547             pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
 548             pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
 549             pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
 550                 return ICE_ERR_CFG;
 551 
 552         
 553         seg_count = le32_to_cpu(pkg->seg_count);
 554         if (seg_count < 1)
 555                 return ICE_ERR_CFG;
 556 
 557         
 558         if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
 559                 return ICE_ERR_BUF_TOO_SHORT;
 560 
 561         
 562         for (i = 0; i < seg_count; i++) {
 563                 u32 off = le32_to_cpu(pkg->seg_offset[i]);
 564                 struct ice_generic_seg_hdr *seg;
 565 
 566                 
 567                 if (len < off + sizeof(*seg))
 568                         return ICE_ERR_BUF_TOO_SHORT;
 569 
 570                 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
 571 
 572                 
 573                 if (len < off + le32_to_cpu(seg->seg_size))
 574                         return ICE_ERR_BUF_TOO_SHORT;
 575         }
 576 
 577         return 0;
 578 }
 579 
 580 
 581 
 582 
 583 
 584 
 585 
 586 
 587 void ice_free_seg(struct ice_hw *hw)
 588 {
 589         if (hw->pkg_copy) {
 590                 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
 591                 hw->pkg_copy = NULL;
 592                 hw->pkg_size = 0;
 593         }
 594         hw->seg = NULL;
 595 }
 596 
 597 
 598 
 599 
 600 
 601 static void ice_init_pkg_regs(struct ice_hw *hw)
 602 {
 603 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
 604 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
 605 #define ICE_SW_BLK_IDX  0
 606 
 607         
 608         wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
 609         wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
 610 }
 611 
 612 
 613 
 614 
 615 
 616 
 617 
 618 
 619 
 620 
 621 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
 622 {
 623         if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
 624             pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
 625                 return ICE_ERR_NOT_SUPPORTED;
 626 
 627         return 0;
 628 }
 629 
 630 
 631 
 632 
 633 
 634 
 635 
 636 
 637 
 638 
 639 
 640 
 641 
 642 
 643 
 644 
 645 
 646 
 647 
 648 
 649 
 650 
 651 
 652 
 653 
 654 
 655 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
 656 {
 657         struct ice_pkg_hdr *pkg;
 658         enum ice_status status;
 659         struct ice_seg *seg;
 660 
 661         if (!buf || !len)
 662                 return ICE_ERR_PARAM;
 663 
 664         pkg = (struct ice_pkg_hdr *)buf;
 665         status = ice_verify_pkg(pkg, len);
 666         if (status) {
 667                 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
 668                           status);
 669                 return status;
 670         }
 671 
 672         
 673         status = ice_init_pkg_info(hw, pkg);
 674         if (status)
 675                 return status;
 676 
 677         
 678 
 679 
 680         status = ice_chk_pkg_version(&hw->pkg_ver);
 681         if (status)
 682                 return status;
 683 
 684         
 685         seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
 686         if (!seg) {
 687                 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
 688                 return ICE_ERR_CFG;
 689         }
 690 
 691         
 692         status = ice_download_pkg(hw, seg);
 693         if (status == ICE_ERR_AQ_NO_WORK) {
 694                 ice_debug(hw, ICE_DBG_INIT,
 695                           "package previously loaded - no work.\n");
 696                 status = 0;
 697         }
 698 
 699         
 700 
 701 
 702         if (!status) {
 703                 status = ice_get_pkg_info(hw);
 704                 if (!status)
 705                         status = ice_chk_pkg_version(&hw->active_pkg_ver);
 706         }
 707 
 708         if (!status) {
 709                 hw->seg = seg;
 710                 
 711 
 712 
 713 
 714                 ice_init_pkg_regs(hw);
 715                 ice_fill_blk_tbls(hw);
 716         } else {
 717                 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
 718                           status);
 719         }
 720 
 721         return status;
 722 }
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 
 731 
 732 
 733 
 734 
 735 
 736 
 737 
 738 
 739 
 740 
 741 
 742 
 743 
 744 
 745 
 746 
 747 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
 748 {
 749         enum ice_status status;
 750         u8 *buf_copy;
 751 
 752         if (!buf || !len)
 753                 return ICE_ERR_PARAM;
 754 
 755         buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
 756 
 757         status = ice_init_pkg(hw, buf_copy, len);
 758         if (status) {
 759                 
 760                 devm_kfree(ice_hw_to_dev(hw), buf_copy);
 761         } else {
 762                 
 763                 hw->pkg_copy = buf_copy;
 764                 hw->pkg_size = len;
 765         }
 766 
 767         return status;
 768 }
 769 
 770 
 771 
 772 
 773 
 774 
 775 
 776 
 777 
 778 
 779 
 780 
 781 
 782 
 783 static enum ice_status
 784 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
 785 {
 786         if (ptype >= ICE_XLT1_CNT || !ptg)
 787                 return ICE_ERR_PARAM;
 788 
 789         *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
 790         return 0;
 791 }
 792 
 793 
 794 
 795 
 796 
 797 
 798 
 799 
 800 
 801 
 802 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
 803 {
 804         hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
 805 }
 806 
 807 
 808 
 809 
 810 
 811 
 812 
 813 
 814 
 815 
 816 
 817 static enum ice_status
 818 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
 819 {
 820         struct ice_ptg_ptype **ch;
 821         struct ice_ptg_ptype *p;
 822 
 823         if (ptype > ICE_XLT1_CNT - 1)
 824                 return ICE_ERR_PARAM;
 825 
 826         if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
 827                 return ICE_ERR_DOES_NOT_EXIST;
 828 
 829         
 830         if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
 831                 return ICE_ERR_CFG;
 832 
 833         
 834         p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 835         ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 836         while (p) {
 837                 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
 838                         *ch = p->next_ptype;
 839                         break;
 840                 }
 841 
 842                 ch = &p->next_ptype;
 843                 p = p->next_ptype;
 844         }
 845 
 846         hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
 847         hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
 848 
 849         return 0;
 850 }
 851 
 852 
 853 
 854 
 855 
 856 
 857 
 858 
 859 
 860 
 861 
 862 
 863 
 864 static enum ice_status
 865 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
 866 {
 867         enum ice_status status;
 868         u8 original_ptg;
 869 
 870         if (ptype > ICE_XLT1_CNT - 1)
 871                 return ICE_ERR_PARAM;
 872 
 873         if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
 874                 return ICE_ERR_DOES_NOT_EXIST;
 875 
 876         status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
 877         if (status)
 878                 return status;
 879 
 880         
 881         if (original_ptg == ptg)
 882                 return 0;
 883 
 884         
 885         if (original_ptg != ICE_DEFAULT_PTG)
 886                 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
 887 
 888         
 889         if (ptg == ICE_DEFAULT_PTG)
 890                 return 0;
 891 
 892         
 893         hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
 894                 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 895         hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
 896                 &hw->blk[blk].xlt1.ptypes[ptype];
 897 
 898         hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
 899         hw->blk[blk].xlt1.t[ptype] = ptg;
 900 
 901         return 0;
 902 }
 903 
 904 
 905 struct ice_blk_size_details {
 906         u16 xlt1;                       
 907         u16 xlt2;                       
 908         u16 prof_tcam;                  
 909         u16 prof_id;                    
 910         u8 prof_cdid_bits;              
 911         u16 prof_redir;                 
 912         u16 es;                         
 913         u16 fvw;                        
 914         u8 overwrite;                   
 915         u8 reverse;                     
 916 };
 917 
 918 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
 919         
 920 
 921 
 922 
 923 
 924 
 925 
 926 
 927 
 928 
 929 
 930 
 931         
 932         
 933          { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
 934                     false, false },
 935          { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
 936                     false, false },
 937          { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
 938                     false, true  },
 939          { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
 940                     true,  true  },
 941          { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
 942                     false, false },
 943 };
 944 
 945 enum ice_sid_all {
 946         ICE_SID_XLT1_OFF = 0,
 947         ICE_SID_XLT2_OFF,
 948         ICE_SID_PR_OFF,
 949         ICE_SID_PR_REDIR_OFF,
 950         ICE_SID_ES_OFF,
 951         ICE_SID_OFF_COUNT,
 952 };
 953 
 954 
 955 
 956 
 957 
 958 
 959 
 960 
 961 
 962 
 963 
 964 
 965 
 966 static enum ice_status
 967 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
 968 {
 969         if (!vsig || vsi >= ICE_MAX_VSI)
 970                 return ICE_ERR_PARAM;
 971 
 972         
 973 
 974 
 975 
 976         *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
 977 
 978         return 0;
 979 }
 980 
 981 
 982 
 983 
 984 
 985 
 986 
 987 
 988 
 989 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
 990 {
 991         u16 idx = vsig & ICE_VSIG_IDX_M;
 992 
 993         if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
 994                 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
 995                 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
 996         }
 997 
 998         return ICE_VSIG_VALUE(idx, hw->pf_id);
 999 }
1000 
1001 
1002 
1003 
1004 
1005 
1006 
1007 
1008 
1009 
1010 
1011 static enum ice_status
1012 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1013 {
1014         struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
1015         u16 idx;
1016 
1017         idx = vsig & ICE_VSIG_IDX_M;
1018 
1019         if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1020                 return ICE_ERR_PARAM;
1021 
1022         if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1023                 return ICE_ERR_DOES_NOT_EXIST;
1024 
1025         
1026         if (idx == ICE_DEFAULT_VSIG)
1027                 return 0;
1028 
1029         vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1030         if (!(*vsi_head))
1031                 return ICE_ERR_CFG;
1032 
1033         vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1034         vsi_cur = (*vsi_head);
1035 
1036         
1037         while (vsi_cur) {
1038                 if (vsi_tgt == vsi_cur) {
1039                         (*vsi_head) = vsi_cur->next_vsi;
1040                         break;
1041                 }
1042                 vsi_head = &vsi_cur->next_vsi;
1043                 vsi_cur = vsi_cur->next_vsi;
1044         }
1045 
1046         
1047         if (!vsi_cur)
1048                 return ICE_ERR_DOES_NOT_EXIST;
1049 
1050         vsi_cur->vsig = ICE_DEFAULT_VSIG;
1051         vsi_cur->changed = 1;
1052         vsi_cur->next_vsi = NULL;
1053 
1054         return 0;
1055 }
1056 
1057 
1058 
1059 
1060 
1061 
1062 
1063 
1064 
1065 
1066 
1067 
1068 
1069 static enum ice_status
1070 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1071 {
1072         struct ice_vsig_vsi *tmp;
1073         enum ice_status status;
1074         u16 orig_vsig, idx;
1075 
1076         idx = vsig & ICE_VSIG_IDX_M;
1077 
1078         if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1079                 return ICE_ERR_PARAM;
1080 
1081         
1082 
1083 
1084         if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1085             vsig != ICE_DEFAULT_VSIG)
1086                 return ICE_ERR_DOES_NOT_EXIST;
1087 
1088         status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1089         if (status)
1090                 return status;
1091 
1092         
1093         if (orig_vsig == vsig)
1094                 return 0;
1095 
1096         if (orig_vsig != ICE_DEFAULT_VSIG) {
1097                 
1098                 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1099                 if (status)
1100                         return status;
1101         }
1102 
1103         if (idx == ICE_DEFAULT_VSIG)
1104                 return 0;
1105 
1106         
1107         hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1108         hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1109 
1110         
1111         tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1112         hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1113                 &hw->blk[blk].xlt2.vsis[vsi];
1114         hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1115         hw->blk[blk].xlt2.t[vsi] = vsig;
1116 
1117         return 0;
1118 }
1119 
1120 
1121 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
1122         
1123         {       ICE_SID_XLT1_SW,
1124                 ICE_SID_XLT2_SW,
1125                 ICE_SID_PROFID_TCAM_SW,
1126                 ICE_SID_PROFID_REDIR_SW,
1127                 ICE_SID_FLD_VEC_SW
1128         },
1129 
1130         
1131         {       ICE_SID_XLT1_ACL,
1132                 ICE_SID_XLT2_ACL,
1133                 ICE_SID_PROFID_TCAM_ACL,
1134                 ICE_SID_PROFID_REDIR_ACL,
1135                 ICE_SID_FLD_VEC_ACL
1136         },
1137 
1138         
1139         {       ICE_SID_XLT1_FD,
1140                 ICE_SID_XLT2_FD,
1141                 ICE_SID_PROFID_TCAM_FD,
1142                 ICE_SID_PROFID_REDIR_FD,
1143                 ICE_SID_FLD_VEC_FD
1144         },
1145 
1146         
1147         {       ICE_SID_XLT1_RSS,
1148                 ICE_SID_XLT2_RSS,
1149                 ICE_SID_PROFID_TCAM_RSS,
1150                 ICE_SID_PROFID_REDIR_RSS,
1151                 ICE_SID_FLD_VEC_RSS
1152         },
1153 
1154         
1155         {       ICE_SID_XLT1_PE,
1156                 ICE_SID_XLT2_PE,
1157                 ICE_SID_PROFID_TCAM_PE,
1158                 ICE_SID_PROFID_REDIR_PE,
1159                 ICE_SID_FLD_VEC_PE
1160         }
1161 };
1162 
1163 
1164 
1165 
1166 
1167 
1168 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1169 {
1170         u16 pt;
1171 
1172         for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1173                 u8 ptg;
1174 
1175                 ptg = hw->blk[blk].xlt1.t[pt];
1176                 if (ptg != ICE_DEFAULT_PTG) {
1177                         ice_ptg_alloc_val(hw, blk, ptg);
1178                         ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1179                 }
1180         }
1181 }
1182 
1183 
1184 
1185 
1186 
1187 
1188 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1189 {
1190         u16 vsi;
1191 
1192         for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1193                 u16 vsig;
1194 
1195                 vsig = hw->blk[blk].xlt2.t[vsi];
1196                 if (vsig) {
1197                         ice_vsig_alloc_val(hw, blk, vsig);
1198                         ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1199                         
1200 
1201 
1202                         hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1203                 }
1204         }
1205 }
1206 
1207 
1208 
1209 
1210 
1211 static void ice_init_sw_db(struct ice_hw *hw)
1212 {
1213         u16 i;
1214 
1215         for (i = 0; i < ICE_BLK_COUNT; i++) {
1216                 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
1217                 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
1218         }
1219 }
1220 
1221 
1222 
1223 
1224 
1225 
1226 
1227 
1228 
1229 
1230 
1231 
1232 
1233 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
1234 {
1235         u32 dst_len, sect_len, offset = 0;
1236         struct ice_prof_redir_section *pr;
1237         struct ice_prof_id_section *pid;
1238         struct ice_xlt1_section *xlt1;
1239         struct ice_xlt2_section *xlt2;
1240         struct ice_sw_fv_section *es;
1241         struct ice_pkg_enum state;
1242         u8 *src, *dst;
1243         void *sect;
1244 
1245         
1246 
1247 
1248 
1249         if (!hw->seg) {
1250                 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
1251                 return;
1252         }
1253 
1254         memset(&state, 0, sizeof(state));
1255 
1256         sect = ice_pkg_enum_section(hw->seg, &state, sid);
1257 
1258         while (sect) {
1259                 switch (sid) {
1260                 case ICE_SID_XLT1_SW:
1261                 case ICE_SID_XLT1_FD:
1262                 case ICE_SID_XLT1_RSS:
1263                 case ICE_SID_XLT1_ACL:
1264                 case ICE_SID_XLT1_PE:
1265                         xlt1 = (struct ice_xlt1_section *)sect;
1266                         src = xlt1->value;
1267                         sect_len = le16_to_cpu(xlt1->count) *
1268                                 sizeof(*hw->blk[block_id].xlt1.t);
1269                         dst = hw->blk[block_id].xlt1.t;
1270                         dst_len = hw->blk[block_id].xlt1.count *
1271                                 sizeof(*hw->blk[block_id].xlt1.t);
1272                         break;
1273                 case ICE_SID_XLT2_SW:
1274                 case ICE_SID_XLT2_FD:
1275                 case ICE_SID_XLT2_RSS:
1276                 case ICE_SID_XLT2_ACL:
1277                 case ICE_SID_XLT2_PE:
1278                         xlt2 = (struct ice_xlt2_section *)sect;
1279                         src = (__force u8 *)xlt2->value;
1280                         sect_len = le16_to_cpu(xlt2->count) *
1281                                 sizeof(*hw->blk[block_id].xlt2.t);
1282                         dst = (u8 *)hw->blk[block_id].xlt2.t;
1283                         dst_len = hw->blk[block_id].xlt2.count *
1284                                 sizeof(*hw->blk[block_id].xlt2.t);
1285                         break;
1286                 case ICE_SID_PROFID_TCAM_SW:
1287                 case ICE_SID_PROFID_TCAM_FD:
1288                 case ICE_SID_PROFID_TCAM_RSS:
1289                 case ICE_SID_PROFID_TCAM_ACL:
1290                 case ICE_SID_PROFID_TCAM_PE:
1291                         pid = (struct ice_prof_id_section *)sect;
1292                         src = (u8 *)pid->entry;
1293                         sect_len = le16_to_cpu(pid->count) *
1294                                 sizeof(*hw->blk[block_id].prof.t);
1295                         dst = (u8 *)hw->blk[block_id].prof.t;
1296                         dst_len = hw->blk[block_id].prof.count *
1297                                 sizeof(*hw->blk[block_id].prof.t);
1298                         break;
1299                 case ICE_SID_PROFID_REDIR_SW:
1300                 case ICE_SID_PROFID_REDIR_FD:
1301                 case ICE_SID_PROFID_REDIR_RSS:
1302                 case ICE_SID_PROFID_REDIR_ACL:
1303                 case ICE_SID_PROFID_REDIR_PE:
1304                         pr = (struct ice_prof_redir_section *)sect;
1305                         src = pr->redir_value;
1306                         sect_len = le16_to_cpu(pr->count) *
1307                                 sizeof(*hw->blk[block_id].prof_redir.t);
1308                         dst = hw->blk[block_id].prof_redir.t;
1309                         dst_len = hw->blk[block_id].prof_redir.count *
1310                                 sizeof(*hw->blk[block_id].prof_redir.t);
1311                         break;
1312                 case ICE_SID_FLD_VEC_SW:
1313                 case ICE_SID_FLD_VEC_FD:
1314                 case ICE_SID_FLD_VEC_RSS:
1315                 case ICE_SID_FLD_VEC_ACL:
1316                 case ICE_SID_FLD_VEC_PE:
1317                         es = (struct ice_sw_fv_section *)sect;
1318                         src = (u8 *)es->fv;
1319                         sect_len = (u32)(le16_to_cpu(es->count) *
1320                                          hw->blk[block_id].es.fvw) *
1321                                 sizeof(*hw->blk[block_id].es.t);
1322                         dst = (u8 *)hw->blk[block_id].es.t;
1323                         dst_len = (u32)(hw->blk[block_id].es.count *
1324                                         hw->blk[block_id].es.fvw) *
1325                                 sizeof(*hw->blk[block_id].es.t);
1326                         break;
1327                 default:
1328                         return;
1329                 }
1330 
1331                 
1332 
1333 
1334                 if (offset > dst_len)
1335                         return;
1336 
1337                 
1338 
1339 
1340 
1341 
1342                 if ((offset + sect_len) > dst_len)
1343                         sect_len = dst_len - offset;
1344 
1345                 memcpy(dst + offset, src, sect_len);
1346                 offset += sect_len;
1347                 sect = ice_pkg_enum_section(NULL, &state, sid);
1348         }
1349 }
1350 
1351 
1352 
1353 
1354 
1355 
1356 
1357 
1358 
1359 void ice_fill_blk_tbls(struct ice_hw *hw)
1360 {
1361         u8 i;
1362 
1363         for (i = 0; i < ICE_BLK_COUNT; i++) {
1364                 enum ice_block blk_id = (enum ice_block)i;
1365 
1366                 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
1367                 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
1368                 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
1369                 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
1370                 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
1371         }
1372 
1373         ice_init_sw_db(hw);
1374 }
1375 
1376 
1377 
1378 
1379 
1380 void ice_free_hw_tbls(struct ice_hw *hw)
1381 {
1382         u8 i;
1383 
1384         for (i = 0; i < ICE_BLK_COUNT; i++) {
1385                 hw->blk[i].is_list_init = false;
1386 
1387                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
1388                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
1389                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
1390                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
1391                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
1392                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
1393                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
1394                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
1395                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
1396                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
1397                 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
1398         }
1399 
1400         memset(hw->blk, 0, sizeof(hw->blk));
1401 }
1402 
1403 
1404 
1405 
1406 
1407 void ice_clear_hw_tbls(struct ice_hw *hw)
1408 {
1409         u8 i;
1410 
1411         for (i = 0; i < ICE_BLK_COUNT; i++) {
1412                 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1413                 struct ice_prof_tcam *prof = &hw->blk[i].prof;
1414                 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1415                 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1416                 struct ice_es *es = &hw->blk[i].es;
1417 
1418                 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
1419                 memset(xlt1->ptg_tbl, 0,
1420                        ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
1421                 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
1422 
1423                 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
1424                 memset(xlt2->vsig_tbl, 0,
1425                        xlt2->count * sizeof(*xlt2->vsig_tbl));
1426                 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
1427 
1428                 memset(prof->t, 0, prof->count * sizeof(*prof->t));
1429                 memset(prof_redir->t, 0,
1430                        prof_redir->count * sizeof(*prof_redir->t));
1431 
1432                 memset(es->t, 0, es->count * sizeof(*es->t));
1433                 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
1434                 memset(es->written, 0, es->count * sizeof(*es->written));
1435         }
1436 }
1437 
1438 
1439 
1440 
1441 
1442 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
1443 {
1444         u8 i;
1445 
1446         for (i = 0; i < ICE_BLK_COUNT; i++) {
1447                 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1448                 struct ice_prof_tcam *prof = &hw->blk[i].prof;
1449                 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1450                 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1451                 struct ice_es *es = &hw->blk[i].es;
1452                 u16 j;
1453 
1454                 if (hw->blk[i].is_list_init)
1455                         continue;
1456 
1457                 hw->blk[i].is_list_init = true;
1458 
1459                 hw->blk[i].overwrite = blk_sizes[i].overwrite;
1460                 es->reverse = blk_sizes[i].reverse;
1461 
1462                 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
1463                 xlt1->count = blk_sizes[i].xlt1;
1464 
1465                 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
1466                                             sizeof(*xlt1->ptypes), GFP_KERNEL);
1467 
1468                 if (!xlt1->ptypes)
1469                         goto err;
1470 
1471                 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
1472                                              sizeof(*xlt1->ptg_tbl),
1473                                              GFP_KERNEL);
1474 
1475                 if (!xlt1->ptg_tbl)
1476                         goto err;
1477 
1478                 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
1479                                        sizeof(*xlt1->t), GFP_KERNEL);
1480                 if (!xlt1->t)
1481                         goto err;
1482 
1483                 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
1484                 xlt2->count = blk_sizes[i].xlt2;
1485 
1486                 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1487                                           sizeof(*xlt2->vsis), GFP_KERNEL);
1488 
1489                 if (!xlt2->vsis)
1490                         goto err;
1491 
1492                 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1493                                               sizeof(*xlt2->vsig_tbl),
1494                                               GFP_KERNEL);
1495                 if (!xlt2->vsig_tbl)
1496                         goto err;
1497 
1498                 for (j = 0; j < xlt2->count; j++)
1499                         INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
1500 
1501                 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1502                                        sizeof(*xlt2->t), GFP_KERNEL);
1503                 if (!xlt2->t)
1504                         goto err;
1505 
1506                 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
1507                 prof->count = blk_sizes[i].prof_tcam;
1508                 prof->max_prof_id = blk_sizes[i].prof_id;
1509                 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
1510                 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
1511                                        sizeof(*prof->t), GFP_KERNEL);
1512 
1513                 if (!prof->t)
1514                         goto err;
1515 
1516                 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
1517                 prof_redir->count = blk_sizes[i].prof_redir;
1518                 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
1519                                              prof_redir->count,
1520                                              sizeof(*prof_redir->t),
1521                                              GFP_KERNEL);
1522 
1523                 if (!prof_redir->t)
1524                         goto err;
1525 
1526                 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
1527                 es->count = blk_sizes[i].es;
1528                 es->fvw = blk_sizes[i].fvw;
1529                 es->t = devm_kcalloc(ice_hw_to_dev(hw),
1530                                      (u32)(es->count * es->fvw),
1531                                      sizeof(*es->t), GFP_KERNEL);
1532                 if (!es->t)
1533                         goto err;
1534 
1535                 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
1536                                              sizeof(*es->ref_count),
1537                                              GFP_KERNEL);
1538 
1539                 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
1540                                            sizeof(*es->written), GFP_KERNEL);
1541                 if (!es->ref_count)
1542                         goto err;
1543         }
1544         return 0;
1545 
1546 err:
1547         ice_free_hw_tbls(hw);
1548         return ICE_ERR_NO_MEMORY;
1549 }