root/drivers/net/ethernet/intel/ice/ice_txrx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ice_unmap_and_free_tx_buf
  2. txring_txq
  3. ice_clean_tx_ring
  4. ice_free_tx_ring
  5. ice_clean_tx_irq
  6. ice_setup_tx_ring
  7. ice_clean_rx_ring
  8. ice_free_rx_ring
  9. ice_setup_rx_ring
  10. ice_release_rx_desc
  11. ice_alloc_mapped_page
  12. ice_alloc_rx_bufs
  13. ice_page_is_reserved
  14. ice_rx_buf_adjust_pg_offset
  15. ice_can_reuse_rx_page
  16. ice_add_rx_frag
  17. ice_reuse_rx_page
  18. ice_get_rx_buf
  19. ice_construct_skb
  20. ice_put_rx_buf
  21. ice_cleanup_headers
  22. ice_test_staterr
  23. ice_is_non_eop
  24. ice_ptype_to_htype
  25. ice_rx_hash
  26. ice_rx_csum
  27. ice_process_skb_fields
  28. ice_receive_skb
  29. ice_clean_rx_irq
  30. ice_adjust_itr_by_size_and_speed
  31. ice_update_itr
  32. ice_buildreg_itr
  33. ice_update_ena_itr
  34. ice_set_wb_on_itr
  35. ice_napi_poll
  36. build_ctob
  37. __ice_maybe_stop_tx
  38. ice_maybe_stop_tx
  39. ice_tx_map
  40. ice_tx_csum
  41. ice_tx_prepare_vlan_flags
  42. ice_tso
  43. ice_txd_use_count
  44. ice_xmit_desc_count
  45. __ice_chk_linearize
  46. ice_chk_linearize
  47. ice_xmit_frame_ring
  48. ice_start_xmit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (c) 2018, Intel Corporation. */
   3 
   4 /* The driver transmit and receive code */
   5 
   6 #include <linux/prefetch.h>
   7 #include <linux/mm.h>
   8 #include "ice.h"
   9 #include "ice_dcb_lib.h"
  10 
  11 #define ICE_RX_HDR_SIZE         256
  12 
  13 /**
  14  * ice_unmap_and_free_tx_buf - Release a Tx buffer
  15  * @ring: the ring that owns the buffer
  16  * @tx_buf: the buffer to free
  17  */
  18 static void
  19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
  20 {
  21         if (tx_buf->skb) {
  22                 dev_kfree_skb_any(tx_buf->skb);
  23                 if (dma_unmap_len(tx_buf, len))
  24                         dma_unmap_single(ring->dev,
  25                                          dma_unmap_addr(tx_buf, dma),
  26                                          dma_unmap_len(tx_buf, len),
  27                                          DMA_TO_DEVICE);
  28         } else if (dma_unmap_len(tx_buf, len)) {
  29                 dma_unmap_page(ring->dev,
  30                                dma_unmap_addr(tx_buf, dma),
  31                                dma_unmap_len(tx_buf, len),
  32                                DMA_TO_DEVICE);
  33         }
  34 
  35         tx_buf->next_to_watch = NULL;
  36         tx_buf->skb = NULL;
  37         dma_unmap_len_set(tx_buf, len, 0);
  38         /* tx_buf must be completely set up in the transmit path */
  39 }
  40 
  41 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
  42 {
  43         return netdev_get_tx_queue(ring->netdev, ring->q_index);
  44 }
  45 
  46 /**
  47  * ice_clean_tx_ring - Free any empty Tx buffers
  48  * @tx_ring: ring to be cleaned
  49  */
  50 void ice_clean_tx_ring(struct ice_ring *tx_ring)
  51 {
  52         u16 i;
  53 
  54         /* ring already cleared, nothing to do */
  55         if (!tx_ring->tx_buf)
  56                 return;
  57 
  58         /* Free all the Tx ring sk_buffs */
  59         for (i = 0; i < tx_ring->count; i++)
  60                 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
  61 
  62         memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
  63 
  64         /* Zero out the descriptor ring */
  65         memset(tx_ring->desc, 0, tx_ring->size);
  66 
  67         tx_ring->next_to_use = 0;
  68         tx_ring->next_to_clean = 0;
  69 
  70         if (!tx_ring->netdev)
  71                 return;
  72 
  73         /* cleanup Tx queue statistics */
  74         netdev_tx_reset_queue(txring_txq(tx_ring));
  75 }
  76 
  77 /**
  78  * ice_free_tx_ring - Free Tx resources per queue
  79  * @tx_ring: Tx descriptor ring for a specific queue
  80  *
  81  * Free all transmit software resources
  82  */
  83 void ice_free_tx_ring(struct ice_ring *tx_ring)
  84 {
  85         ice_clean_tx_ring(tx_ring);
  86         devm_kfree(tx_ring->dev, tx_ring->tx_buf);
  87         tx_ring->tx_buf = NULL;
  88 
  89         if (tx_ring->desc) {
  90                 dmam_free_coherent(tx_ring->dev, tx_ring->size,
  91                                    tx_ring->desc, tx_ring->dma);
  92                 tx_ring->desc = NULL;
  93         }
  94 }
  95 
  96 /**
  97  * ice_clean_tx_irq - Reclaim resources after transmit completes
  98  * @tx_ring: Tx ring to clean
  99  * @napi_budget: Used to determine if we are in netpoll
 100  *
 101  * Returns true if there's any budget left (e.g. the clean is finished)
 102  */
 103 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 104 {
 105         unsigned int total_bytes = 0, total_pkts = 0;
 106         unsigned int budget = ICE_DFLT_IRQ_WORK;
 107         struct ice_vsi *vsi = tx_ring->vsi;
 108         s16 i = tx_ring->next_to_clean;
 109         struct ice_tx_desc *tx_desc;
 110         struct ice_tx_buf *tx_buf;
 111 
 112         tx_buf = &tx_ring->tx_buf[i];
 113         tx_desc = ICE_TX_DESC(tx_ring, i);
 114         i -= tx_ring->count;
 115 
 116         prefetch(&vsi->state);
 117 
 118         do {
 119                 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
 120 
 121                 /* if next_to_watch is not set then there is no work pending */
 122                 if (!eop_desc)
 123                         break;
 124 
 125                 smp_rmb();      /* prevent any other reads prior to eop_desc */
 126 
 127                 /* if the descriptor isn't done, no work yet to do */
 128                 if (!(eop_desc->cmd_type_offset_bsz &
 129                       cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
 130                         break;
 131 
 132                 /* clear next_to_watch to prevent false hangs */
 133                 tx_buf->next_to_watch = NULL;
 134 
 135                 /* update the statistics for this packet */
 136                 total_bytes += tx_buf->bytecount;
 137                 total_pkts += tx_buf->gso_segs;
 138 
 139                 /* free the skb */
 140                 napi_consume_skb(tx_buf->skb, napi_budget);
 141 
 142                 /* unmap skb header data */
 143                 dma_unmap_single(tx_ring->dev,
 144                                  dma_unmap_addr(tx_buf, dma),
 145                                  dma_unmap_len(tx_buf, len),
 146                                  DMA_TO_DEVICE);
 147 
 148                 /* clear tx_buf data */
 149                 tx_buf->skb = NULL;
 150                 dma_unmap_len_set(tx_buf, len, 0);
 151 
 152                 /* unmap remaining buffers */
 153                 while (tx_desc != eop_desc) {
 154                         tx_buf++;
 155                         tx_desc++;
 156                         i++;
 157                         if (unlikely(!i)) {
 158                                 i -= tx_ring->count;
 159                                 tx_buf = tx_ring->tx_buf;
 160                                 tx_desc = ICE_TX_DESC(tx_ring, 0);
 161                         }
 162 
 163                         /* unmap any remaining paged data */
 164                         if (dma_unmap_len(tx_buf, len)) {
 165                                 dma_unmap_page(tx_ring->dev,
 166                                                dma_unmap_addr(tx_buf, dma),
 167                                                dma_unmap_len(tx_buf, len),
 168                                                DMA_TO_DEVICE);
 169                                 dma_unmap_len_set(tx_buf, len, 0);
 170                         }
 171                 }
 172 
 173                 /* move us one more past the eop_desc for start of next pkt */
 174                 tx_buf++;
 175                 tx_desc++;
 176                 i++;
 177                 if (unlikely(!i)) {
 178                         i -= tx_ring->count;
 179                         tx_buf = tx_ring->tx_buf;
 180                         tx_desc = ICE_TX_DESC(tx_ring, 0);
 181                 }
 182 
 183                 prefetch(tx_desc);
 184 
 185                 /* update budget accounting */
 186                 budget--;
 187         } while (likely(budget));
 188 
 189         i += tx_ring->count;
 190         tx_ring->next_to_clean = i;
 191         u64_stats_update_begin(&tx_ring->syncp);
 192         tx_ring->stats.bytes += total_bytes;
 193         tx_ring->stats.pkts += total_pkts;
 194         u64_stats_update_end(&tx_ring->syncp);
 195         tx_ring->q_vector->tx.total_bytes += total_bytes;
 196         tx_ring->q_vector->tx.total_pkts += total_pkts;
 197 
 198         netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
 199                                   total_bytes);
 200 
 201 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
 202         if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
 203                      (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 204                 /* Make sure that anybody stopping the queue after this
 205                  * sees the new next_to_clean.
 206                  */
 207                 smp_mb();
 208                 if (__netif_subqueue_stopped(tx_ring->netdev,
 209                                              tx_ring->q_index) &&
 210                     !test_bit(__ICE_DOWN, vsi->state)) {
 211                         netif_wake_subqueue(tx_ring->netdev,
 212                                             tx_ring->q_index);
 213                         ++tx_ring->tx_stats.restart_q;
 214                 }
 215         }
 216 
 217         return !!budget;
 218 }
 219 
 220 /**
 221  * ice_setup_tx_ring - Allocate the Tx descriptors
 222  * @tx_ring: the Tx ring to set up
 223  *
 224  * Return 0 on success, negative on error
 225  */
 226 int ice_setup_tx_ring(struct ice_ring *tx_ring)
 227 {
 228         struct device *dev = tx_ring->dev;
 229 
 230         if (!dev)
 231                 return -ENOMEM;
 232 
 233         /* warn if we are about to overwrite the pointer */
 234         WARN_ON(tx_ring->tx_buf);
 235         tx_ring->tx_buf =
 236                 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
 237                              GFP_KERNEL);
 238         if (!tx_ring->tx_buf)
 239                 return -ENOMEM;
 240 
 241         /* round up to nearest page */
 242         tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 243                               PAGE_SIZE);
 244         tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
 245                                             GFP_KERNEL);
 246         if (!tx_ring->desc) {
 247                 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 248                         tx_ring->size);
 249                 goto err;
 250         }
 251 
 252         tx_ring->next_to_use = 0;
 253         tx_ring->next_to_clean = 0;
 254         tx_ring->tx_stats.prev_pkt = -1;
 255         return 0;
 256 
 257 err:
 258         devm_kfree(dev, tx_ring->tx_buf);
 259         tx_ring->tx_buf = NULL;
 260         return -ENOMEM;
 261 }
 262 
 263 /**
 264  * ice_clean_rx_ring - Free Rx buffers
 265  * @rx_ring: ring to be cleaned
 266  */
 267 void ice_clean_rx_ring(struct ice_ring *rx_ring)
 268 {
 269         struct device *dev = rx_ring->dev;
 270         u16 i;
 271 
 272         /* ring already cleared, nothing to do */
 273         if (!rx_ring->rx_buf)
 274                 return;
 275 
 276         /* Free all the Rx ring sk_buffs */
 277         for (i = 0; i < rx_ring->count; i++) {
 278                 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
 279 
 280                 if (rx_buf->skb) {
 281                         dev_kfree_skb(rx_buf->skb);
 282                         rx_buf->skb = NULL;
 283                 }
 284                 if (!rx_buf->page)
 285                         continue;
 286 
 287                 /* Invalidate cache lines that may have been written to by
 288                  * device so that we avoid corrupting memory.
 289                  */
 290                 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
 291                                               rx_buf->page_offset,
 292                                               ICE_RXBUF_2048, DMA_FROM_DEVICE);
 293 
 294                 /* free resources associated with mapping */
 295                 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
 296                                      DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 297                 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 298 
 299                 rx_buf->page = NULL;
 300                 rx_buf->page_offset = 0;
 301         }
 302 
 303         memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
 304 
 305         /* Zero out the descriptor ring */
 306         memset(rx_ring->desc, 0, rx_ring->size);
 307 
 308         rx_ring->next_to_alloc = 0;
 309         rx_ring->next_to_clean = 0;
 310         rx_ring->next_to_use = 0;
 311 }
 312 
 313 /**
 314  * ice_free_rx_ring - Free Rx resources
 315  * @rx_ring: ring to clean the resources from
 316  *
 317  * Free all receive software resources
 318  */
 319 void ice_free_rx_ring(struct ice_ring *rx_ring)
 320 {
 321         ice_clean_rx_ring(rx_ring);
 322         devm_kfree(rx_ring->dev, rx_ring->rx_buf);
 323         rx_ring->rx_buf = NULL;
 324 
 325         if (rx_ring->desc) {
 326                 dmam_free_coherent(rx_ring->dev, rx_ring->size,
 327                                    rx_ring->desc, rx_ring->dma);
 328                 rx_ring->desc = NULL;
 329         }
 330 }
 331 
 332 /**
 333  * ice_setup_rx_ring - Allocate the Rx descriptors
 334  * @rx_ring: the Rx ring to set up
 335  *
 336  * Return 0 on success, negative on error
 337  */
 338 int ice_setup_rx_ring(struct ice_ring *rx_ring)
 339 {
 340         struct device *dev = rx_ring->dev;
 341 
 342         if (!dev)
 343                 return -ENOMEM;
 344 
 345         /* warn if we are about to overwrite the pointer */
 346         WARN_ON(rx_ring->rx_buf);
 347         rx_ring->rx_buf =
 348                 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
 349                              GFP_KERNEL);
 350         if (!rx_ring->rx_buf)
 351                 return -ENOMEM;
 352 
 353         /* round up to nearest page */
 354         rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 355                               PAGE_SIZE);
 356         rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
 357                                             GFP_KERNEL);
 358         if (!rx_ring->desc) {
 359                 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 360                         rx_ring->size);
 361                 goto err;
 362         }
 363 
 364         rx_ring->next_to_use = 0;
 365         rx_ring->next_to_clean = 0;
 366         return 0;
 367 
 368 err:
 369         devm_kfree(dev, rx_ring->rx_buf);
 370         rx_ring->rx_buf = NULL;
 371         return -ENOMEM;
 372 }
 373 
 374 /**
 375  * ice_release_rx_desc - Store the new tail and head values
 376  * @rx_ring: ring to bump
 377  * @val: new head index
 378  */
 379 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
 380 {
 381         u16 prev_ntu = rx_ring->next_to_use;
 382 
 383         rx_ring->next_to_use = val;
 384 
 385         /* update next to alloc since we have filled the ring */
 386         rx_ring->next_to_alloc = val;
 387 
 388         /* QRX_TAIL will be updated with any tail value, but hardware ignores
 389          * the lower 3 bits. This makes it so we only bump tail on meaningful
 390          * boundaries. Also, this allows us to bump tail on intervals of 8 up to
 391          * the budget depending on the current traffic load.
 392          */
 393         val &= ~0x7;
 394         if (prev_ntu != val) {
 395                 /* Force memory writes to complete before letting h/w
 396                  * know there are new descriptors to fetch. (Only
 397                  * applicable for weak-ordered memory model archs,
 398                  * such as IA-64).
 399                  */
 400                 wmb();
 401                 writel(val, rx_ring->tail);
 402         }
 403 }
 404 
 405 /**
 406  * ice_alloc_mapped_page - recycle or make a new page
 407  * @rx_ring: ring to use
 408  * @bi: rx_buf struct to modify
 409  *
 410  * Returns true if the page was successfully allocated or
 411  * reused.
 412  */
 413 static bool
 414 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
 415 {
 416         struct page *page = bi->page;
 417         dma_addr_t dma;
 418 
 419         /* since we are recycling buffers we should seldom need to alloc */
 420         if (likely(page)) {
 421                 rx_ring->rx_stats.page_reuse_count++;
 422                 return true;
 423         }
 424 
 425         /* alloc new page for storage */
 426         page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 427         if (unlikely(!page)) {
 428                 rx_ring->rx_stats.alloc_page_failed++;
 429                 return false;
 430         }
 431 
 432         /* map page for use */
 433         dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
 434                                  DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 435 
 436         /* if mapping failed free memory back to system since
 437          * there isn't much point in holding memory we can't use
 438          */
 439         if (dma_mapping_error(rx_ring->dev, dma)) {
 440                 __free_pages(page, 0);
 441                 rx_ring->rx_stats.alloc_page_failed++;
 442                 return false;
 443         }
 444 
 445         bi->dma = dma;
 446         bi->page = page;
 447         bi->page_offset = 0;
 448         page_ref_add(page, USHRT_MAX - 1);
 449         bi->pagecnt_bias = USHRT_MAX;
 450 
 451         return true;
 452 }
 453 
 454 /**
 455  * ice_alloc_rx_bufs - Replace used receive buffers
 456  * @rx_ring: ring to place buffers on
 457  * @cleaned_count: number of buffers to replace
 458  *
 459  * Returns false if all allocations were successful, true if any fail. Returning
 460  * true signals to the caller that we didn't replace cleaned_count buffers and
 461  * there is more work to do.
 462  *
 463  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
 464  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
 465  * multiple tail writes per call.
 466  */
 467 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
 468 {
 469         union ice_32b_rx_flex_desc *rx_desc;
 470         u16 ntu = rx_ring->next_to_use;
 471         struct ice_rx_buf *bi;
 472 
 473         /* do nothing if no valid netdev defined */
 474         if (!rx_ring->netdev || !cleaned_count)
 475                 return false;
 476 
 477         /* get the Rx descriptor and buffer based on next_to_use */
 478         rx_desc = ICE_RX_DESC(rx_ring, ntu);
 479         bi = &rx_ring->rx_buf[ntu];
 480 
 481         do {
 482                 /* if we fail here, we have work remaining */
 483                 if (!ice_alloc_mapped_page(rx_ring, bi))
 484                         break;
 485 
 486                 /* sync the buffer for use by the device */
 487                 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 488                                                  bi->page_offset,
 489                                                  ICE_RXBUF_2048,
 490                                                  DMA_FROM_DEVICE);
 491 
 492                 /* Refresh the desc even if buffer_addrs didn't change
 493                  * because each write-back erases this info.
 494                  */
 495                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 496 
 497                 rx_desc++;
 498                 bi++;
 499                 ntu++;
 500                 if (unlikely(ntu == rx_ring->count)) {
 501                         rx_desc = ICE_RX_DESC(rx_ring, 0);
 502                         bi = rx_ring->rx_buf;
 503                         ntu = 0;
 504                 }
 505 
 506                 /* clear the status bits for the next_to_use descriptor */
 507                 rx_desc->wb.status_error0 = 0;
 508 
 509                 cleaned_count--;
 510         } while (cleaned_count);
 511 
 512         if (rx_ring->next_to_use != ntu)
 513                 ice_release_rx_desc(rx_ring, ntu);
 514 
 515         return !!cleaned_count;
 516 }
 517 
 518 /**
 519  * ice_page_is_reserved - check if reuse is possible
 520  * @page: page struct to check
 521  */
 522 static bool ice_page_is_reserved(struct page *page)
 523 {
 524         return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 525 }
 526 
 527 /**
 528  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
 529  * @rx_buf: Rx buffer to adjust
 530  * @size: Size of adjustment
 531  *
 532  * Update the offset within page so that Rx buf will be ready to be reused.
 533  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
 534  * so the second half of page assigned to Rx buffer will be used, otherwise
 535  * the offset is moved by the @size bytes
 536  */
 537 static void
 538 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
 539 {
 540 #if (PAGE_SIZE < 8192)
 541         /* flip page offset to other buffer */
 542         rx_buf->page_offset ^= size;
 543 #else
 544         /* move offset up to the next cache line */
 545         rx_buf->page_offset += size;
 546 #endif
 547 }
 548 
 549 /**
 550  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
 551  * @rx_buf: buffer containing the page
 552  *
 553  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
 554  * which will assign the current buffer to the buffer that next_to_alloc is
 555  * pointing to; otherwise, the DMA mapping needs to be destroyed and
 556  * page freed
 557  */
 558 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
 559 {
 560 #if (PAGE_SIZE >= 8192)
 561         unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
 562 #endif
 563         unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
 564         struct page *page = rx_buf->page;
 565 
 566         /* avoid re-using remote pages */
 567         if (unlikely(ice_page_is_reserved(page)))
 568                 return false;
 569 
 570 #if (PAGE_SIZE < 8192)
 571         /* if we are only owner of page we can reuse it */
 572         if (unlikely((page_count(page) - pagecnt_bias) > 1))
 573                 return false;
 574 #else
 575         if (rx_buf->page_offset > last_offset)
 576                 return false;
 577 #endif /* PAGE_SIZE < 8192) */
 578 
 579         /* If we have drained the page fragment pool we need to update
 580          * the pagecnt_bias and page count so that we fully restock the
 581          * number of references the driver holds.
 582          */
 583         if (unlikely(pagecnt_bias == 1)) {
 584                 page_ref_add(page, USHRT_MAX - 1);
 585                 rx_buf->pagecnt_bias = USHRT_MAX;
 586         }
 587 
 588         return true;
 589 }
 590 
 591 /**
 592  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
 593  * @rx_buf: buffer containing page to add
 594  * @skb: sk_buff to place the data into
 595  * @size: packet length from rx_desc
 596  *
 597  * This function will add the data contained in rx_buf->page to the skb.
 598  * It will just attach the page as a frag to the skb.
 599  * The function will then update the page offset.
 600  */
 601 static void
 602 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
 603                 unsigned int size)
 604 {
 605 #if (PAGE_SIZE >= 8192)
 606         unsigned int truesize = SKB_DATA_ALIGN(size);
 607 #else
 608         unsigned int truesize = ICE_RXBUF_2048;
 609 #endif
 610 
 611         if (!size)
 612                 return;
 613         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
 614                         rx_buf->page_offset, size, truesize);
 615 
 616         /* page is being used so we must update the page offset */
 617         ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 618 }
 619 
 620 /**
 621  * ice_reuse_rx_page - page flip buffer and store it back on the ring
 622  * @rx_ring: Rx descriptor ring to store buffers on
 623  * @old_buf: donor buffer to have page reused
 624  *
 625  * Synchronizes page for reuse by the adapter
 626  */
 627 static void
 628 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
 629 {
 630         u16 nta = rx_ring->next_to_alloc;
 631         struct ice_rx_buf *new_buf;
 632 
 633         new_buf = &rx_ring->rx_buf[nta];
 634 
 635         /* update, and store next to alloc */
 636         nta++;
 637         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 638 
 639         /* Transfer page from old buffer to new buffer.
 640          * Move each member individually to avoid possible store
 641          * forwarding stalls and unnecessary copy of skb.
 642          */
 643         new_buf->dma = old_buf->dma;
 644         new_buf->page = old_buf->page;
 645         new_buf->page_offset = old_buf->page_offset;
 646         new_buf->pagecnt_bias = old_buf->pagecnt_bias;
 647 }
 648 
 649 /**
 650  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
 651  * @rx_ring: Rx descriptor ring to transact packets on
 652  * @skb: skb to be used
 653  * @size: size of buffer to add to skb
 654  *
 655  * This function will pull an Rx buffer from the ring and synchronize it
 656  * for use by the CPU.
 657  */
 658 static struct ice_rx_buf *
 659 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
 660                const unsigned int size)
 661 {
 662         struct ice_rx_buf *rx_buf;
 663 
 664         rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
 665         prefetchw(rx_buf->page);
 666         *skb = rx_buf->skb;
 667 
 668         if (!size)
 669                 return rx_buf;
 670         /* we are reusing so sync this buffer for CPU use */
 671         dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
 672                                       rx_buf->page_offset, size,
 673                                       DMA_FROM_DEVICE);
 674 
 675         /* We have pulled a buffer for use, so decrement pagecnt_bias */
 676         rx_buf->pagecnt_bias--;
 677 
 678         return rx_buf;
 679 }
 680 
 681 /**
 682  * ice_construct_skb - Allocate skb and populate it
 683  * @rx_ring: Rx descriptor ring to transact packets on
 684  * @rx_buf: Rx buffer to pull data from
 685  * @size: the length of the packet
 686  *
 687  * This function allocates an skb. It then populates it with the page
 688  * data from the current receive descriptor, taking care to set up the
 689  * skb correctly.
 690  */
 691 static struct sk_buff *
 692 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 693                   unsigned int size)
 694 {
 695         void *va = page_address(rx_buf->page) + rx_buf->page_offset;
 696         unsigned int headlen;
 697         struct sk_buff *skb;
 698 
 699         /* prefetch first cache line of first page */
 700         prefetch(va);
 701 #if L1_CACHE_BYTES < 128
 702         prefetch((u8 *)va + L1_CACHE_BYTES);
 703 #endif /* L1_CACHE_BYTES */
 704 
 705         /* allocate a skb to store the frags */
 706         skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
 707                                GFP_ATOMIC | __GFP_NOWARN);
 708         if (unlikely(!skb))
 709                 return NULL;
 710 
 711         skb_record_rx_queue(skb, rx_ring->q_index);
 712         /* Determine available headroom for copy */
 713         headlen = size;
 714         if (headlen > ICE_RX_HDR_SIZE)
 715                 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
 716 
 717         /* align pull length to size of long to optimize memcpy performance */
 718         memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
 719 
 720         /* if we exhaust the linear part then add what is left as a frag */
 721         size -= headlen;
 722         if (size) {
 723 #if (PAGE_SIZE >= 8192)
 724                 unsigned int truesize = SKB_DATA_ALIGN(size);
 725 #else
 726                 unsigned int truesize = ICE_RXBUF_2048;
 727 #endif
 728                 skb_add_rx_frag(skb, 0, rx_buf->page,
 729                                 rx_buf->page_offset + headlen, size, truesize);
 730                 /* buffer is used by skb, update page_offset */
 731                 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 732         } else {
 733                 /* buffer is unused, reset bias back to rx_buf; data was copied
 734                  * onto skb's linear part so there's no need for adjusting
 735                  * page offset and we can reuse this buffer as-is
 736                  */
 737                 rx_buf->pagecnt_bias++;
 738         }
 739 
 740         return skb;
 741 }
 742 
 743 /**
 744  * ice_put_rx_buf - Clean up used buffer and either recycle or free
 745  * @rx_ring: Rx descriptor ring to transact packets on
 746  * @rx_buf: Rx buffer to pull data from
 747  *
 748  * This function will  clean up the contents of the rx_buf. It will
 749  * either recycle the buffer or unmap it and free the associated resources.
 750  */
 751 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
 752 {
 753         if (!rx_buf)
 754                 return;
 755 
 756         if (ice_can_reuse_rx_page(rx_buf)) {
 757                 /* hand second half of page back to the ring */
 758                 ice_reuse_rx_page(rx_ring, rx_buf);
 759                 rx_ring->rx_stats.page_reuse_count++;
 760         } else {
 761                 /* we are not reusing the buffer so unmap it */
 762                 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
 763                                      DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 764                 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 765         }
 766 
 767         /* clear contents of buffer_info */
 768         rx_buf->page = NULL;
 769         rx_buf->skb = NULL;
 770 }
 771 
 772 /**
 773  * ice_cleanup_headers - Correct empty headers
 774  * @skb: pointer to current skb being fixed
 775  *
 776  * Also address the case where we are pulling data in on pages only
 777  * and as such no data is present in the skb header.
 778  *
 779  * In addition if skb is not at least 60 bytes we need to pad it so that
 780  * it is large enough to qualify as a valid Ethernet frame.
 781  *
 782  * Returns true if an error was encountered and skb was freed.
 783  */
 784 static bool ice_cleanup_headers(struct sk_buff *skb)
 785 {
 786         /* if eth_skb_pad returns an error the skb was freed */
 787         if (eth_skb_pad(skb))
 788                 return true;
 789 
 790         return false;
 791 }
 792 
 793 /**
 794  * ice_test_staterr - tests bits in Rx descriptor status and error fields
 795  * @rx_desc: pointer to receive descriptor (in le64 format)
 796  * @stat_err_bits: value to mask
 797  *
 798  * This function does some fast chicanery in order to return the
 799  * value of the mask which is really only used for boolean tests.
 800  * The status_error_len doesn't need to be shifted because it begins
 801  * at offset zero.
 802  */
 803 static bool
 804 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
 805 {
 806         return !!(rx_desc->wb.status_error0 &
 807                   cpu_to_le16(stat_err_bits));
 808 }
 809 
 810 /**
 811  * ice_is_non_eop - process handling of non-EOP buffers
 812  * @rx_ring: Rx ring being processed
 813  * @rx_desc: Rx descriptor for current buffer
 814  * @skb: Current socket buffer containing buffer in progress
 815  *
 816  * This function updates next to clean. If the buffer is an EOP buffer
 817  * this function exits returning false, otherwise it will place the
 818  * sk_buff in the next buffer to be chained and return true indicating
 819  * that this is in fact a non-EOP buffer.
 820  */
 821 static bool
 822 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
 823                struct sk_buff *skb)
 824 {
 825         u32 ntc = rx_ring->next_to_clean + 1;
 826 
 827         /* fetch, update, and store next to clean */
 828         ntc = (ntc < rx_ring->count) ? ntc : 0;
 829         rx_ring->next_to_clean = ntc;
 830 
 831         prefetch(ICE_RX_DESC(rx_ring, ntc));
 832 
 833         /* if we are the last buffer then there is nothing else to do */
 834 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
 835         if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
 836                 return false;
 837 
 838         /* place skb in next buffer to be received */
 839         rx_ring->rx_buf[ntc].skb = skb;
 840         rx_ring->rx_stats.non_eop_descs++;
 841 
 842         return true;
 843 }
 844 
 845 /**
 846  * ice_ptype_to_htype - get a hash type
 847  * @ptype: the ptype value from the descriptor
 848  *
 849  * Returns a hash type to be used by skb_set_hash
 850  */
 851 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
 852 {
 853         return PKT_HASH_TYPE_NONE;
 854 }
 855 
 856 /**
 857  * ice_rx_hash - set the hash value in the skb
 858  * @rx_ring: descriptor ring
 859  * @rx_desc: specific descriptor
 860  * @skb: pointer to current skb
 861  * @rx_ptype: the ptype value from the descriptor
 862  */
 863 static void
 864 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
 865             struct sk_buff *skb, u8 rx_ptype)
 866 {
 867         struct ice_32b_rx_flex_desc_nic *nic_mdid;
 868         u32 hash;
 869 
 870         if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
 871                 return;
 872 
 873         if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
 874                 return;
 875 
 876         nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
 877         hash = le32_to_cpu(nic_mdid->rss_hash);
 878         skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
 879 }
 880 
 881 /**
 882  * ice_rx_csum - Indicate in skb if checksum is good
 883  * @ring: the ring we care about
 884  * @skb: skb currently being received and modified
 885  * @rx_desc: the receive descriptor
 886  * @ptype: the packet type decoded by hardware
 887  *
 888  * skb->protocol must be set before this function is called
 889  */
 890 static void
 891 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
 892             union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
 893 {
 894         struct ice_rx_ptype_decoded decoded;
 895         u32 rx_error, rx_status;
 896         bool ipv4, ipv6;
 897 
 898         rx_status = le16_to_cpu(rx_desc->wb.status_error0);
 899         rx_error = rx_status;
 900 
 901         decoded = ice_decode_rx_desc_ptype(ptype);
 902 
 903         /* Start with CHECKSUM_NONE and by default csum_level = 0 */
 904         skb->ip_summed = CHECKSUM_NONE;
 905         skb_checksum_none_assert(skb);
 906 
 907         /* check if Rx checksum is enabled */
 908         if (!(ring->netdev->features & NETIF_F_RXCSUM))
 909                 return;
 910 
 911         /* check if HW has decoded the packet and checksum */
 912         if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
 913                 return;
 914 
 915         if (!(decoded.known && decoded.outer_ip))
 916                 return;
 917 
 918         ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
 919                (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
 920         ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
 921                (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
 922 
 923         if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
 924                                  BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
 925                 goto checksum_fail;
 926         else if (ipv6 && (rx_status &
 927                  (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
 928                 goto checksum_fail;
 929 
 930         /* check for L4 errors and handle packets that were not able to be
 931          * checksummed due to arrival speed
 932          */
 933         if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
 934                 goto checksum_fail;
 935 
 936         /* Only report checksum unnecessary for TCP, UDP, or SCTP */
 937         switch (decoded.inner_prot) {
 938         case ICE_RX_PTYPE_INNER_PROT_TCP:
 939         case ICE_RX_PTYPE_INNER_PROT_UDP:
 940         case ICE_RX_PTYPE_INNER_PROT_SCTP:
 941                 skb->ip_summed = CHECKSUM_UNNECESSARY;
 942         default:
 943                 break;
 944         }
 945         return;
 946 
 947 checksum_fail:
 948         ring->vsi->back->hw_csum_rx_error++;
 949 }
 950 
 951 /**
 952  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
 953  * @rx_ring: Rx descriptor ring packet is being transacted on
 954  * @rx_desc: pointer to the EOP Rx descriptor
 955  * @skb: pointer to current skb being populated
 956  * @ptype: the packet type decoded by hardware
 957  *
 958  * This function checks the ring, descriptor, and packet information in
 959  * order to populate the hash, checksum, VLAN, protocol, and
 960  * other fields within the skb.
 961  */
 962 static void
 963 ice_process_skb_fields(struct ice_ring *rx_ring,
 964                        union ice_32b_rx_flex_desc *rx_desc,
 965                        struct sk_buff *skb, u8 ptype)
 966 {
 967         ice_rx_hash(rx_ring, rx_desc, skb, ptype);
 968 
 969         /* modifies the skb - consumes the enet header */
 970         skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 971 
 972         ice_rx_csum(rx_ring, skb, rx_desc, ptype);
 973 }
 974 
 975 /**
 976  * ice_receive_skb - Send a completed packet up the stack
 977  * @rx_ring: Rx ring in play
 978  * @skb: packet to send up
 979  * @vlan_tag: VLAN tag for packet
 980  *
 981  * This function sends the completed packet (via. skb) up the stack using
 982  * gro receive functions (with/without VLAN tag)
 983  */
 984 static void
 985 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
 986 {
 987         if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 988             (vlan_tag & VLAN_VID_MASK))
 989                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 990         napi_gro_receive(&rx_ring->q_vector->napi, skb);
 991 }
 992 
 993 /**
 994  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
 995  * @rx_ring: Rx descriptor ring to transact packets on
 996  * @budget: Total limit on number of packets to process
 997  *
 998  * This function provides a "bounce buffer" approach to Rx interrupt
 999  * processing. The advantage to this is that on systems that have
1000  * expensive overhead for IOMMU access this provides a means of avoiding
1001  * it by maintaining the mapping of the page to the system.
1002  *
1003  * Returns amount of work completed
1004  */
1005 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1006 {
1007         unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1008         u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1009         bool failure;
1010 
1011         /* start the loop to process Rx packets bounded by 'budget' */
1012         while (likely(total_rx_pkts < (unsigned int)budget)) {
1013                 union ice_32b_rx_flex_desc *rx_desc;
1014                 struct ice_rx_buf *rx_buf;
1015                 struct sk_buff *skb;
1016                 unsigned int size;
1017                 u16 stat_err_bits;
1018                 u16 vlan_tag = 0;
1019                 u8 rx_ptype;
1020 
1021                 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1022                 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1023 
1024                 /* status_error_len will always be zero for unused descriptors
1025                  * because it's cleared in cleanup, and overlaps with hdr_addr
1026                  * which is always zero because packet split isn't used, if the
1027                  * hardware wrote DD then it will be non-zero
1028                  */
1029                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1030                 if (!ice_test_staterr(rx_desc, stat_err_bits))
1031                         break;
1032 
1033                 /* This memory barrier is needed to keep us from reading
1034                  * any other fields out of the rx_desc until we know the
1035                  * DD bit is set.
1036                  */
1037                 dma_rmb();
1038 
1039                 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1040                         ICE_RX_FLX_DESC_PKT_LEN_M;
1041 
1042                 /* retrieve a buffer from the ring */
1043                 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1044 
1045                 if (skb)
1046                         ice_add_rx_frag(rx_buf, skb, size);
1047                 else
1048                         skb = ice_construct_skb(rx_ring, rx_buf, size);
1049 
1050                 /* exit if we failed to retrieve a buffer */
1051                 if (!skb) {
1052                         rx_ring->rx_stats.alloc_buf_failed++;
1053                         if (rx_buf)
1054                                 rx_buf->pagecnt_bias++;
1055                         break;
1056                 }
1057 
1058                 ice_put_rx_buf(rx_ring, rx_buf);
1059                 cleaned_count++;
1060 
1061                 /* skip if it is NOP desc */
1062                 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1063                         continue;
1064 
1065                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1066                 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1067                         dev_kfree_skb_any(skb);
1068                         continue;
1069                 }
1070 
1071                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1072                 if (ice_test_staterr(rx_desc, stat_err_bits))
1073                         vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1074 
1075                 /* correct empty headers and pad skb if needed (to make valid
1076                  * ethernet frame
1077                  */
1078                 if (ice_cleanup_headers(skb)) {
1079                         skb = NULL;
1080                         continue;
1081                 }
1082 
1083                 /* probably a little skewed due to removing CRC */
1084                 total_rx_bytes += skb->len;
1085 
1086                 /* populate checksum, VLAN, and protocol */
1087                 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1088                         ICE_RX_FLEX_DESC_PTYPE_M;
1089 
1090                 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1091 
1092                 /* send completed skb up the stack */
1093                 ice_receive_skb(rx_ring, skb, vlan_tag);
1094 
1095                 /* update budget accounting */
1096                 total_rx_pkts++;
1097         }
1098 
1099         /* return up to cleaned_count buffers to hardware */
1100         failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1101 
1102         /* update queue and vector specific stats */
1103         u64_stats_update_begin(&rx_ring->syncp);
1104         rx_ring->stats.pkts += total_rx_pkts;
1105         rx_ring->stats.bytes += total_rx_bytes;
1106         u64_stats_update_end(&rx_ring->syncp);
1107         rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1108         rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1109 
1110         /* guarantee a trip back through this routine if there was a failure */
1111         return failure ? budget : (int)total_rx_pkts;
1112 }
1113 
1114 /**
1115  * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1116  * @port_info: port_info structure containing the current link speed
1117  * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1118  * @itr: ITR value to update
1119  *
1120  * Calculate how big of an increment should be applied to the ITR value passed
1121  * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1122  * link speed.
1123  *
1124  * The following is a calculation derived from:
1125  *  wmem_default / (size + overhead) = desired_pkts_per_int
1126  *  rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1127  *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1128  *
1129  * Assuming wmem_default is 212992 and overhead is 640 bytes per
1130  * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1131  * formula down to:
1132  *
1133  *       wmem_default * bits_per_byte * usecs_per_sec   pkt_size + 24
1134  * ITR = -------------------------------------------- * --------------
1135  *                           rate                       pkt_size + 640
1136  */
1137 static unsigned int
1138 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1139                                  unsigned int avg_pkt_size,
1140                                  unsigned int itr)
1141 {
1142         switch (port_info->phy.link_info.link_speed) {
1143         case ICE_AQ_LINK_SPEED_100GB:
1144                 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1145                                     avg_pkt_size + 640);
1146                 break;
1147         case ICE_AQ_LINK_SPEED_50GB:
1148                 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1149                                     avg_pkt_size + 640);
1150                 break;
1151         case ICE_AQ_LINK_SPEED_40GB:
1152                 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1153                                     avg_pkt_size + 640);
1154                 break;
1155         case ICE_AQ_LINK_SPEED_25GB:
1156                 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1157                                     avg_pkt_size + 640);
1158                 break;
1159         case ICE_AQ_LINK_SPEED_20GB:
1160                 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1161                                     avg_pkt_size + 640);
1162                 break;
1163         case ICE_AQ_LINK_SPEED_10GB:
1164                 /* fall through */
1165         default:
1166                 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1167                                     avg_pkt_size + 640);
1168                 break;
1169         }
1170 
1171         if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1172                 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1173                 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1174         }
1175 
1176         return itr;
1177 }
1178 
1179 /**
1180  * ice_update_itr - update the adaptive ITR value based on statistics
1181  * @q_vector: structure containing interrupt and ring information
1182  * @rc: structure containing ring performance data
1183  *
1184  * Stores a new ITR value based on packets and byte
1185  * counts during the last interrupt.  The advantage of per interrupt
1186  * computation is faster updates and more accurate ITR for the current
1187  * traffic pattern.  Constants in this function were computed
1188  * based on theoretical maximum wire speed and thresholds were set based
1189  * on testing data as well as attempting to minimize response time
1190  * while increasing bulk throughput.
1191  */
1192 static void
1193 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1194 {
1195         unsigned long next_update = jiffies;
1196         unsigned int packets, bytes, itr;
1197         bool container_is_rx;
1198 
1199         if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1200                 return;
1201 
1202         /* If itr_countdown is set it means we programmed an ITR within
1203          * the last 4 interrupt cycles. This has a side effect of us
1204          * potentially firing an early interrupt. In order to work around
1205          * this we need to throw out any data received for a few
1206          * interrupts following the update.
1207          */
1208         if (q_vector->itr_countdown) {
1209                 itr = rc->target_itr;
1210                 goto clear_counts;
1211         }
1212 
1213         container_is_rx = (&q_vector->rx == rc);
1214         /* For Rx we want to push the delay up and default to low latency.
1215          * for Tx we want to pull the delay down and default to high latency.
1216          */
1217         itr = container_is_rx ?
1218                 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1219                 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1220 
1221         /* If we didn't update within up to 1 - 2 jiffies we can assume
1222          * that either packets are coming in so slow there hasn't been
1223          * any work, or that there is so much work that NAPI is dealing
1224          * with interrupt moderation and we don't need to do anything.
1225          */
1226         if (time_after(next_update, rc->next_update))
1227                 goto clear_counts;
1228 
1229         prefetch(q_vector->vsi->port_info);
1230 
1231         packets = rc->total_pkts;
1232         bytes = rc->total_bytes;
1233 
1234         if (container_is_rx) {
1235                 /* If Rx there are 1 to 4 packets and bytes are less than
1236                  * 9000 assume insufficient data to use bulk rate limiting
1237                  * approach unless Tx is already in bulk rate limiting. We
1238                  * are likely latency driven.
1239                  */
1240                 if (packets && packets < 4 && bytes < 9000 &&
1241                     (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1242                         itr = ICE_ITR_ADAPTIVE_LATENCY;
1243                         goto adjust_by_size_and_speed;
1244                 }
1245         } else if (packets < 4) {
1246                 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1247                  * bulk mode and we are receiving 4 or fewer packets just
1248                  * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1249                  * that the Rx can relax.
1250                  */
1251                 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1252                     (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1253                     ICE_ITR_ADAPTIVE_MAX_USECS)
1254                         goto clear_counts;
1255         } else if (packets > 32) {
1256                 /* If we have processed over 32 packets in a single interrupt
1257                  * for Tx assume we need to switch over to "bulk" mode.
1258                  */
1259                 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1260         }
1261 
1262         /* We have no packets to actually measure against. This means
1263          * either one of the other queues on this vector is active or
1264          * we are a Tx queue doing TSO with too high of an interrupt rate.
1265          *
1266          * Between 4 and 56 we can assume that our current interrupt delay
1267          * is only slightly too low. As such we should increase it by a small
1268          * fixed amount.
1269          */
1270         if (packets < 56) {
1271                 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1272                 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1273                         itr &= ICE_ITR_ADAPTIVE_LATENCY;
1274                         itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1275                 }
1276                 goto clear_counts;
1277         }
1278 
1279         if (packets <= 256) {
1280                 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1281                 itr &= ICE_ITR_MASK;
1282 
1283                 /* Between 56 and 112 is our "goldilocks" zone where we are
1284                  * working out "just right". Just report that our current
1285                  * ITR is good for us.
1286                  */
1287                 if (packets <= 112)
1288                         goto clear_counts;
1289 
1290                 /* If packet count is 128 or greater we are likely looking
1291                  * at a slight overrun of the delay we want. Try halving
1292                  * our delay to see if that will cut the number of packets
1293                  * in half per interrupt.
1294                  */
1295                 itr >>= 1;
1296                 itr &= ICE_ITR_MASK;
1297                 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1298                         itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1299 
1300                 goto clear_counts;
1301         }
1302 
1303         /* The paths below assume we are dealing with a bulk ITR since
1304          * number of packets is greater than 256. We are just going to have
1305          * to compute a value and try to bring the count under control,
1306          * though for smaller packet sizes there isn't much we can do as
1307          * NAPI polling will likely be kicking in sooner rather than later.
1308          */
1309         itr = ICE_ITR_ADAPTIVE_BULK;
1310 
1311 adjust_by_size_and_speed:
1312 
1313         /* based on checks above packets cannot be 0 so division is safe */
1314         itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1315                                                bytes / packets, itr);
1316 
1317 clear_counts:
1318         /* write back value */
1319         rc->target_itr = itr;
1320 
1321         /* next update should occur within next jiffy */
1322         rc->next_update = next_update + 1;
1323 
1324         rc->total_bytes = 0;
1325         rc->total_pkts = 0;
1326 }
1327 
1328 /**
1329  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1330  * @itr_idx: interrupt throttling index
1331  * @itr: interrupt throttling value in usecs
1332  */
1333 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1334 {
1335         /* The ITR value is reported in microseconds, and the register value is
1336          * recorded in 2 microsecond units. For this reason we only need to
1337          * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1338          * granularity as a shift instead of division. The mask makes sure the
1339          * ITR value is never odd so we don't accidentally write into the field
1340          * prior to the ITR field.
1341          */
1342         itr &= ICE_ITR_MASK;
1343 
1344         return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1345                 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1346                 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1347 }
1348 
1349 /* The act of updating the ITR will cause it to immediately trigger. In order
1350  * to prevent this from throwing off adaptive update statistics we defer the
1351  * update so that it can only happen so often. So after either Tx or Rx are
1352  * updated we make the adaptive scheme wait until either the ITR completely
1353  * expires via the next_update expiration or we have been through at least
1354  * 3 interrupts.
1355  */
1356 #define ITR_COUNTDOWN_START 3
1357 
1358 /**
1359  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1360  * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1361  */
1362 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1363 {
1364         struct ice_ring_container *tx = &q_vector->tx;
1365         struct ice_ring_container *rx = &q_vector->rx;
1366         struct ice_vsi *vsi = q_vector->vsi;
1367         u32 itr_val;
1368 
1369         /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1370          * interrupts to expire right away in case we have more work ready to go
1371          * already
1372          */
1373         if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1374                 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1375                 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1376                 /* set target back to last user set value */
1377                 rx->target_itr = rx->itr_setting;
1378                 /* set current to what we just wrote and dynamic if needed */
1379                 rx->current_itr = ICE_WB_ON_ITR_USECS |
1380                         (rx->itr_setting & ICE_ITR_DYNAMIC);
1381                 /* allow normal interrupt flow to start */
1382                 q_vector->itr_countdown = 0;
1383                 return;
1384         }
1385 
1386         /* This will do nothing if dynamic updates are not enabled */
1387         ice_update_itr(q_vector, tx);
1388         ice_update_itr(q_vector, rx);
1389 
1390         /* This block of logic allows us to get away with only updating
1391          * one ITR value with each interrupt. The idea is to perform a
1392          * pseudo-lazy update with the following criteria.
1393          *
1394          * 1. Rx is given higher priority than Tx if both are in same state
1395          * 2. If we must reduce an ITR that is given highest priority.
1396          * 3. We then give priority to increasing ITR based on amount.
1397          */
1398         if (rx->target_itr < rx->current_itr) {
1399                 /* Rx ITR needs to be reduced, this is highest priority */
1400                 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1401                 rx->current_itr = rx->target_itr;
1402                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1403         } else if ((tx->target_itr < tx->current_itr) ||
1404                    ((rx->target_itr - rx->current_itr) <
1405                     (tx->target_itr - tx->current_itr))) {
1406                 /* Tx ITR needs to be reduced, this is second priority
1407                  * Tx ITR needs to be increased more than Rx, fourth priority
1408                  */
1409                 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1410                 tx->current_itr = tx->target_itr;
1411                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1412         } else if (rx->current_itr != rx->target_itr) {
1413                 /* Rx ITR needs to be increased, third priority */
1414                 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1415                 rx->current_itr = rx->target_itr;
1416                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1417         } else {
1418                 /* Still have to re-enable the interrupts */
1419                 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1420                 if (q_vector->itr_countdown)
1421                         q_vector->itr_countdown--;
1422         }
1423 
1424         if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1425                 wr32(&q_vector->vsi->back->hw,
1426                      GLINT_DYN_CTL(q_vector->reg_idx),
1427                      itr_val);
1428 }
1429 
1430 /**
1431  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1432  * @q_vector: q_vector to set WB_ON_ITR on
1433  *
1434  * We need to tell hardware to write-back completed descriptors even when
1435  * interrupts are disabled. Descriptors will be written back on cache line
1436  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1437  * descriptors may not be written back if they don't fill a cache line until the
1438  * next interrupt.
1439  *
1440  * This sets the write-back frequency to 2 microseconds as that is the minimum
1441  * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1442  * make sure hardware knows we aren't meddling with the INTENA_M bit.
1443  */
1444 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1445 {
1446         struct ice_vsi *vsi = q_vector->vsi;
1447 
1448         /* already in WB_ON_ITR mode no need to change it */
1449         if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1450                 return;
1451 
1452         if (q_vector->num_ring_rx)
1453                 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1454                      ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1455                                                  ICE_RX_ITR));
1456 
1457         if (q_vector->num_ring_tx)
1458                 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1459                      ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1460                                                  ICE_TX_ITR));
1461 
1462         q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1463 }
1464 
1465 /**
1466  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1467  * @napi: napi struct with our devices info in it
1468  * @budget: amount of work driver is allowed to do this pass, in packets
1469  *
1470  * This function will clean all queues associated with a q_vector.
1471  *
1472  * Returns the amount of work done
1473  */
1474 int ice_napi_poll(struct napi_struct *napi, int budget)
1475 {
1476         struct ice_q_vector *q_vector =
1477                                 container_of(napi, struct ice_q_vector, napi);
1478         bool clean_complete = true;
1479         struct ice_ring *ring;
1480         int budget_per_ring;
1481         int work_done = 0;
1482 
1483         /* Since the actual Tx work is minimal, we can give the Tx a larger
1484          * budget and be more aggressive about cleaning up the Tx descriptors.
1485          */
1486         ice_for_each_ring(ring, q_vector->tx)
1487                 if (!ice_clean_tx_irq(ring, budget))
1488                         clean_complete = false;
1489 
1490         /* Handle case where we are called by netpoll with a budget of 0 */
1491         if (unlikely(budget <= 0))
1492                 return budget;
1493 
1494         /* normally we have 1 Rx ring per q_vector */
1495         if (unlikely(q_vector->num_ring_rx > 1))
1496                 /* We attempt to distribute budget to each Rx queue fairly, but
1497                  * don't allow the budget to go below 1 because that would exit
1498                  * polling early.
1499                  */
1500                 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1501         else
1502                 /* Max of 1 Rx ring in this q_vector so give it the budget */
1503                 budget_per_ring = budget;
1504 
1505         ice_for_each_ring(ring, q_vector->rx) {
1506                 int cleaned;
1507 
1508                 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1509                 work_done += cleaned;
1510                 /* if we clean as many as budgeted, we must not be done */
1511                 if (cleaned >= budget_per_ring)
1512                         clean_complete = false;
1513         }
1514 
1515         /* If work not completed, return budget and polling will return */
1516         if (!clean_complete)
1517                 return budget;
1518 
1519         /* Exit the polling mode, but don't re-enable interrupts if stack might
1520          * poll us due to busy-polling
1521          */
1522         if (likely(napi_complete_done(napi, work_done)))
1523                 ice_update_ena_itr(q_vector);
1524         else
1525                 ice_set_wb_on_itr(q_vector);
1526 
1527         return min_t(int, work_done, budget - 1);
1528 }
1529 
1530 /* helper function for building cmd/type/offset */
1531 static __le64
1532 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1533 {
1534         return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1535                            (td_cmd    << ICE_TXD_QW1_CMD_S) |
1536                            (td_offset << ICE_TXD_QW1_OFFSET_S) |
1537                            ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1538                            (td_tag    << ICE_TXD_QW1_L2TAG1_S));
1539 }
1540 
1541 /**
1542  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1543  * @tx_ring: the ring to be checked
1544  * @size: the size buffer we want to assure is available
1545  *
1546  * Returns -EBUSY if a stop is needed, else 0
1547  */
1548 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1549 {
1550         netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1551         /* Memory barrier before checking head and tail */
1552         smp_mb();
1553 
1554         /* Check again in a case another CPU has just made room available. */
1555         if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1556                 return -EBUSY;
1557 
1558         /* A reprieve! - use start_subqueue because it doesn't call schedule */
1559         netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1560         ++tx_ring->tx_stats.restart_q;
1561         return 0;
1562 }
1563 
1564 /**
1565  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1566  * @tx_ring: the ring to be checked
1567  * @size:    the size buffer we want to assure is available
1568  *
1569  * Returns 0 if stop is not needed
1570  */
1571 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1572 {
1573         if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1574                 return 0;
1575 
1576         return __ice_maybe_stop_tx(tx_ring, size);
1577 }
1578 
1579 /**
1580  * ice_tx_map - Build the Tx descriptor
1581  * @tx_ring: ring to send buffer on
1582  * @first: first buffer info buffer to use
1583  * @off: pointer to struct that holds offload parameters
1584  *
1585  * This function loops over the skb data pointed to by *first
1586  * and gets a physical address for each memory location and programs
1587  * it and the length into the transmit descriptor.
1588  */
1589 static void
1590 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1591            struct ice_tx_offload_params *off)
1592 {
1593         u64 td_offset, td_tag, td_cmd;
1594         u16 i = tx_ring->next_to_use;
1595         skb_frag_t *frag;
1596         unsigned int data_len, size;
1597         struct ice_tx_desc *tx_desc;
1598         struct ice_tx_buf *tx_buf;
1599         struct sk_buff *skb;
1600         dma_addr_t dma;
1601 
1602         td_tag = off->td_l2tag1;
1603         td_cmd = off->td_cmd;
1604         td_offset = off->td_offset;
1605         skb = first->skb;
1606 
1607         data_len = skb->data_len;
1608         size = skb_headlen(skb);
1609 
1610         tx_desc = ICE_TX_DESC(tx_ring, i);
1611 
1612         if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1613                 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1614                 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1615                           ICE_TX_FLAGS_VLAN_S;
1616         }
1617 
1618         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1619 
1620         tx_buf = first;
1621 
1622         for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1623                 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1624 
1625                 if (dma_mapping_error(tx_ring->dev, dma))
1626                         goto dma_error;
1627 
1628                 /* record length, and DMA address */
1629                 dma_unmap_len_set(tx_buf, len, size);
1630                 dma_unmap_addr_set(tx_buf, dma, dma);
1631 
1632                 /* align size to end of page */
1633                 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1634                 tx_desc->buf_addr = cpu_to_le64(dma);
1635 
1636                 /* account for data chunks larger than the hardware
1637                  * can handle
1638                  */
1639                 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1640                         tx_desc->cmd_type_offset_bsz =
1641                                 build_ctob(td_cmd, td_offset, max_data, td_tag);
1642 
1643                         tx_desc++;
1644                         i++;
1645 
1646                         if (i == tx_ring->count) {
1647                                 tx_desc = ICE_TX_DESC(tx_ring, 0);
1648                                 i = 0;
1649                         }
1650 
1651                         dma += max_data;
1652                         size -= max_data;
1653 
1654                         max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1655                         tx_desc->buf_addr = cpu_to_le64(dma);
1656                 }
1657 
1658                 if (likely(!data_len))
1659                         break;
1660 
1661                 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1662                                                           size, td_tag);
1663 
1664                 tx_desc++;
1665                 i++;
1666 
1667                 if (i == tx_ring->count) {
1668                         tx_desc = ICE_TX_DESC(tx_ring, 0);
1669                         i = 0;
1670                 }
1671 
1672                 size = skb_frag_size(frag);
1673                 data_len -= size;
1674 
1675                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1676                                        DMA_TO_DEVICE);
1677 
1678                 tx_buf = &tx_ring->tx_buf[i];
1679         }
1680 
1681         /* record bytecount for BQL */
1682         netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1683 
1684         /* record SW timestamp if HW timestamp is not available */
1685         skb_tx_timestamp(first->skb);
1686 
1687         i++;
1688         if (i == tx_ring->count)
1689                 i = 0;
1690 
1691         /* write last descriptor with RS and EOP bits */
1692         td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1693         tx_desc->cmd_type_offset_bsz =
1694                         build_ctob(td_cmd, td_offset, size, td_tag);
1695 
1696         /* Force memory writes to complete before letting h/w know there
1697          * are new descriptors to fetch.
1698          *
1699          * We also use this memory barrier to make certain all of the
1700          * status bits have been updated before next_to_watch is written.
1701          */
1702         wmb();
1703 
1704         /* set next_to_watch value indicating a packet is present */
1705         first->next_to_watch = tx_desc;
1706 
1707         tx_ring->next_to_use = i;
1708 
1709         ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1710 
1711         /* notify HW of packet */
1712         if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1713                 writel(i, tx_ring->tail);
1714         }
1715 
1716         return;
1717 
1718 dma_error:
1719         /* clear DMA mappings for failed tx_buf map */
1720         for (;;) {
1721                 tx_buf = &tx_ring->tx_buf[i];
1722                 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1723                 if (tx_buf == first)
1724                         break;
1725                 if (i == 0)
1726                         i = tx_ring->count;
1727                 i--;
1728         }
1729 
1730         tx_ring->next_to_use = i;
1731 }
1732 
1733 /**
1734  * ice_tx_csum - Enable Tx checksum offloads
1735  * @first: pointer to the first descriptor
1736  * @off: pointer to struct that holds offload parameters
1737  *
1738  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1739  */
1740 static
1741 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1742 {
1743         u32 l4_len = 0, l3_len = 0, l2_len = 0;
1744         struct sk_buff *skb = first->skb;
1745         union {
1746                 struct iphdr *v4;
1747                 struct ipv6hdr *v6;
1748                 unsigned char *hdr;
1749         } ip;
1750         union {
1751                 struct tcphdr *tcp;
1752                 unsigned char *hdr;
1753         } l4;
1754         __be16 frag_off, protocol;
1755         unsigned char *exthdr;
1756         u32 offset, cmd = 0;
1757         u8 l4_proto = 0;
1758 
1759         if (skb->ip_summed != CHECKSUM_PARTIAL)
1760                 return 0;
1761 
1762         ip.hdr = skb_network_header(skb);
1763         l4.hdr = skb_transport_header(skb);
1764 
1765         /* compute outer L2 header size */
1766         l2_len = ip.hdr - skb->data;
1767         offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1768 
1769         if (skb->encapsulation)
1770                 return -1;
1771 
1772         /* Enable IP checksum offloads */
1773         protocol = vlan_get_protocol(skb);
1774         if (protocol == htons(ETH_P_IP)) {
1775                 l4_proto = ip.v4->protocol;
1776                 /* the stack computes the IP header already, the only time we
1777                  * need the hardware to recompute it is in the case of TSO.
1778                  */
1779                 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1780                         cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1781                 else
1782                         cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1783 
1784         } else if (protocol == htons(ETH_P_IPV6)) {
1785                 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1786                 exthdr = ip.hdr + sizeof(*ip.v6);
1787                 l4_proto = ip.v6->nexthdr;
1788                 if (l4.hdr != exthdr)
1789                         ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1790                                          &frag_off);
1791         } else {
1792                 return -1;
1793         }
1794 
1795         /* compute inner L3 header size */
1796         l3_len = l4.hdr - ip.hdr;
1797         offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1798 
1799         /* Enable L4 checksum offloads */
1800         switch (l4_proto) {
1801         case IPPROTO_TCP:
1802                 /* enable checksum offloads */
1803                 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1804                 l4_len = l4.tcp->doff;
1805                 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1806                 break;
1807         case IPPROTO_UDP:
1808                 /* enable UDP checksum offload */
1809                 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1810                 l4_len = (sizeof(struct udphdr) >> 2);
1811                 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1812                 break;
1813         case IPPROTO_SCTP:
1814                 /* enable SCTP checksum offload */
1815                 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1816                 l4_len = sizeof(struct sctphdr) >> 2;
1817                 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1818                 break;
1819 
1820         default:
1821                 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1822                         return -1;
1823                 skb_checksum_help(skb);
1824                 return 0;
1825         }
1826 
1827         off->td_cmd |= cmd;
1828         off->td_offset |= offset;
1829         return 1;
1830 }
1831 
1832 /**
1833  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1834  * @tx_ring: ring to send buffer on
1835  * @first: pointer to struct ice_tx_buf
1836  *
1837  * Checks the skb and set up correspondingly several generic transmit flags
1838  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1839  *
1840  * Returns error code indicate the frame should be dropped upon error and the
1841  * otherwise returns 0 to indicate the flags has been set properly.
1842  */
1843 static int
1844 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1845 {
1846         struct sk_buff *skb = first->skb;
1847         __be16 protocol = skb->protocol;
1848 
1849         if (protocol == htons(ETH_P_8021Q) &&
1850             !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1851                 /* when HW VLAN acceleration is turned off by the user the
1852                  * stack sets the protocol to 8021q so that the driver
1853                  * can take any steps required to support the SW only
1854                  * VLAN handling. In our case the driver doesn't need
1855                  * to take any further steps so just set the protocol
1856                  * to the encapsulated ethertype.
1857                  */
1858                 skb->protocol = vlan_get_protocol(skb);
1859                 return 0;
1860         }
1861 
1862         /* if we have a HW VLAN tag being added, default to the HW one */
1863         if (skb_vlan_tag_present(skb)) {
1864                 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1865                 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1866         } else if (protocol == htons(ETH_P_8021Q)) {
1867                 struct vlan_hdr *vhdr, _vhdr;
1868 
1869                 /* for SW VLAN, check the next protocol and store the tag */
1870                 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1871                                                              sizeof(_vhdr),
1872                                                              &_vhdr);
1873                 if (!vhdr)
1874                         return -EINVAL;
1875 
1876                 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1877                                    ICE_TX_FLAGS_VLAN_S;
1878                 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1879         }
1880 
1881         return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1882 }
1883 
1884 /**
1885  * ice_tso - computes mss and TSO length to prepare for TSO
1886  * @first: pointer to struct ice_tx_buf
1887  * @off: pointer to struct that holds offload parameters
1888  *
1889  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1890  */
1891 static
1892 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1893 {
1894         struct sk_buff *skb = first->skb;
1895         union {
1896                 struct iphdr *v4;
1897                 struct ipv6hdr *v6;
1898                 unsigned char *hdr;
1899         } ip;
1900         union {
1901                 struct tcphdr *tcp;
1902                 unsigned char *hdr;
1903         } l4;
1904         u64 cd_mss, cd_tso_len;
1905         u32 paylen, l4_start;
1906         int err;
1907 
1908         if (skb->ip_summed != CHECKSUM_PARTIAL)
1909                 return 0;
1910 
1911         if (!skb_is_gso(skb))
1912                 return 0;
1913 
1914         err = skb_cow_head(skb, 0);
1915         if (err < 0)
1916                 return err;
1917 
1918         /* cppcheck-suppress unreadVariable */
1919         ip.hdr = skb_network_header(skb);
1920         l4.hdr = skb_transport_header(skb);
1921 
1922         /* initialize outer IP header fields */
1923         if (ip.v4->version == 4) {
1924                 ip.v4->tot_len = 0;
1925                 ip.v4->check = 0;
1926         } else {
1927                 ip.v6->payload_len = 0;
1928         }
1929 
1930         /* determine offset of transport header */
1931         l4_start = l4.hdr - skb->data;
1932 
1933         /* remove payload length from checksum */
1934         paylen = skb->len - l4_start;
1935         csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1936 
1937         /* compute length of segmentation header */
1938         off->header_len = (l4.tcp->doff * 4) + l4_start;
1939 
1940         /* update gso_segs and bytecount */
1941         first->gso_segs = skb_shinfo(skb)->gso_segs;
1942         first->bytecount += (first->gso_segs - 1) * off->header_len;
1943 
1944         cd_tso_len = skb->len - off->header_len;
1945         cd_mss = skb_shinfo(skb)->gso_size;
1946 
1947         /* record cdesc_qw1 with TSO parameters */
1948         off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1949                              (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1950                              (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1951                              (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1952         first->tx_flags |= ICE_TX_FLAGS_TSO;
1953         return 1;
1954 }
1955 
1956 /**
1957  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1958  * @size: transmit request size in bytes
1959  *
1960  * Due to hardware alignment restrictions (4K alignment), we need to
1961  * assume that we can have no more than 12K of data per descriptor, even
1962  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1963  * Thus, we need to divide by 12K. But division is slow! Instead,
1964  * we decompose the operation into shifts and one relatively cheap
1965  * multiply operation.
1966  *
1967  * To divide by 12K, we first divide by 4K, then divide by 3:
1968  *     To divide by 4K, shift right by 12 bits
1969  *     To divide by 3, multiply by 85, then divide by 256
1970  *     (Divide by 256 is done by shifting right by 8 bits)
1971  * Finally, we add one to round up. Because 256 isn't an exact multiple of
1972  * 3, we'll underestimate near each multiple of 12K. This is actually more
1973  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1974  * segment. For our purposes this is accurate out to 1M which is orders of
1975  * magnitude greater than our largest possible GSO size.
1976  *
1977  * This would then be implemented as:
1978  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1979  *
1980  * Since multiplication and division are commutative, we can reorder
1981  * operations into:
1982  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1983  */
1984 static unsigned int ice_txd_use_count(unsigned int size)
1985 {
1986         return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1987 }
1988 
1989 /**
1990  * ice_xmit_desc_count - calculate number of Tx descriptors needed
1991  * @skb: send buffer
1992  *
1993  * Returns number of data descriptors needed for this skb.
1994  */
1995 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1996 {
1997         const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1998         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1999         unsigned int count = 0, size = skb_headlen(skb);
2000 
2001         for (;;) {
2002                 count += ice_txd_use_count(size);
2003 
2004                 if (!nr_frags--)
2005                         break;
2006 
2007                 size = skb_frag_size(frag++);
2008         }
2009 
2010         return count;
2011 }
2012 
2013 /**
2014  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2015  * @skb: send buffer
2016  *
2017  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2018  * and so we need to figure out the cases where we need to linearize the skb.
2019  *
2020  * For TSO we need to count the TSO header and segment payload separately.
2021  * As such we need to check cases where we have 7 fragments or more as we
2022  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2023  * the segment payload in the first descriptor, and another 7 for the
2024  * fragments.
2025  */
2026 static bool __ice_chk_linearize(struct sk_buff *skb)
2027 {
2028         const skb_frag_t *frag, *stale;
2029         int nr_frags, sum;
2030 
2031         /* no need to check if number of frags is less than 7 */
2032         nr_frags = skb_shinfo(skb)->nr_frags;
2033         if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2034                 return false;
2035 
2036         /* We need to walk through the list and validate that each group
2037          * of 6 fragments totals at least gso_size.
2038          */
2039         nr_frags -= ICE_MAX_BUF_TXD - 2;
2040         frag = &skb_shinfo(skb)->frags[0];
2041 
2042         /* Initialize size to the negative value of gso_size minus 1. We
2043          * use this as the worst case scenerio in which the frag ahead
2044          * of us only provides one byte which is why we are limited to 6
2045          * descriptors for a single transmit as the header and previous
2046          * fragment are already consuming 2 descriptors.
2047          */
2048         sum = 1 - skb_shinfo(skb)->gso_size;
2049 
2050         /* Add size of frags 0 through 4 to create our initial sum */
2051         sum += skb_frag_size(frag++);
2052         sum += skb_frag_size(frag++);
2053         sum += skb_frag_size(frag++);
2054         sum += skb_frag_size(frag++);
2055         sum += skb_frag_size(frag++);
2056 
2057         /* Walk through fragments adding latest fragment, testing it, and
2058          * then removing stale fragments from the sum.
2059          */
2060         stale = &skb_shinfo(skb)->frags[0];
2061         for (;;) {
2062                 sum += skb_frag_size(frag++);
2063 
2064                 /* if sum is negative we failed to make sufficient progress */
2065                 if (sum < 0)
2066                         return true;
2067 
2068                 if (!nr_frags--)
2069                         break;
2070 
2071                 sum -= skb_frag_size(stale++);
2072         }
2073 
2074         return false;
2075 }
2076 
2077 /**
2078  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2079  * @skb:      send buffer
2080  * @count:    number of buffers used
2081  *
2082  * Note: Our HW can't scatter-gather more than 8 fragments to build
2083  * a packet on the wire and so we need to figure out the cases where we
2084  * need to linearize the skb.
2085  */
2086 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2087 {
2088         /* Both TSO and single send will work if count is less than 8 */
2089         if (likely(count < ICE_MAX_BUF_TXD))
2090                 return false;
2091 
2092         if (skb_is_gso(skb))
2093                 return __ice_chk_linearize(skb);
2094 
2095         /* we can support up to 8 data buffers for a single send */
2096         return count != ICE_MAX_BUF_TXD;
2097 }
2098 
2099 /**
2100  * ice_xmit_frame_ring - Sends buffer on Tx ring
2101  * @skb: send buffer
2102  * @tx_ring: ring to send buffer on
2103  *
2104  * Returns NETDEV_TX_OK if sent, else an error code
2105  */
2106 static netdev_tx_t
2107 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2108 {
2109         struct ice_tx_offload_params offload = { 0 };
2110         struct ice_vsi *vsi = tx_ring->vsi;
2111         struct ice_tx_buf *first;
2112         unsigned int count;
2113         int tso, csum;
2114 
2115         count = ice_xmit_desc_count(skb);
2116         if (ice_chk_linearize(skb, count)) {
2117                 if (__skb_linearize(skb))
2118                         goto out_drop;
2119                 count = ice_txd_use_count(skb->len);
2120                 tx_ring->tx_stats.tx_linearize++;
2121         }
2122 
2123         /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2124          *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2125          *       + 4 desc gap to avoid the cache line where head is,
2126          *       + 1 desc for context descriptor,
2127          * otherwise try next time
2128          */
2129         if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2130                               ICE_DESCS_FOR_CTX_DESC)) {
2131                 tx_ring->tx_stats.tx_busy++;
2132                 return NETDEV_TX_BUSY;
2133         }
2134 
2135         offload.tx_ring = tx_ring;
2136 
2137         /* record the location of the first descriptor for this packet */
2138         first = &tx_ring->tx_buf[tx_ring->next_to_use];
2139         first->skb = skb;
2140         first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2141         first->gso_segs = 1;
2142         first->tx_flags = 0;
2143 
2144         /* prepare the VLAN tagging flags for Tx */
2145         if (ice_tx_prepare_vlan_flags(tx_ring, first))
2146                 goto out_drop;
2147 
2148         /* set up TSO offload */
2149         tso = ice_tso(first, &offload);
2150         if (tso < 0)
2151                 goto out_drop;
2152 
2153         /* always set up Tx checksum offload */
2154         csum = ice_tx_csum(first, &offload);
2155         if (csum < 0)
2156                 goto out_drop;
2157 
2158         /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2159         if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2160                      vsi->type == ICE_VSI_PF &&
2161                      vsi->port_info->is_sw_lldp))
2162                 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2163                                         ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2164                                         ICE_TXD_CTX_QW1_CMD_S);
2165 
2166         if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2167                 struct ice_tx_ctx_desc *cdesc;
2168                 int i = tx_ring->next_to_use;
2169 
2170                 /* grab the next descriptor */
2171                 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2172                 i++;
2173                 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2174 
2175                 /* setup context descriptor */
2176                 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2177                 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2178                 cdesc->rsvd = cpu_to_le16(0);
2179                 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2180         }
2181 
2182         ice_tx_map(tx_ring, first, &offload);
2183         return NETDEV_TX_OK;
2184 
2185 out_drop:
2186         dev_kfree_skb_any(skb);
2187         return NETDEV_TX_OK;
2188 }
2189 
2190 /**
2191  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2192  * @skb: send buffer
2193  * @netdev: network interface device structure
2194  *
2195  * Returns NETDEV_TX_OK if sent, else an error code
2196  */
2197 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2198 {
2199         struct ice_netdev_priv *np = netdev_priv(netdev);
2200         struct ice_vsi *vsi = np->vsi;
2201         struct ice_ring *tx_ring;
2202 
2203         tx_ring = vsi->tx_rings[skb->queue_mapping];
2204 
2205         /* hardware can't handle really short frames, hardware padding works
2206          * beyond this point
2207          */
2208         if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2209                 return NETDEV_TX_OK;
2210 
2211         return ice_xmit_frame_ring(skb, tx_ring);
2212 }

/* [<][>][^][v][top][bottom][index][help] */