Lines Matching refs:rx_ring

558 	struct ixgbe_ring *rx_ring;  in ixgbe_dump()  local
697 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
699 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
754 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
756 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
767 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
768 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
769 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
793 ixgbe_rx_bufsz(rx_ring), true); in ixgbe_dump()
797 if (i == rx_ring->next_to_use) in ixgbe_dump()
799 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
1264 struct ixgbe_ring *rx_ring, in ixgbe_update_rx_dca() argument
1268 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1269 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1455 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, in ixgbe_alloc_mapped_page() argument
1466 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1468 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1473 dma = dma_map_page(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1474 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_alloc_mapped_page()
1480 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1481 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1483 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1499 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) in ixgbe_alloc_rx_buffers() argument
1503 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1509 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers()
1510 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1511 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1514 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) in ixgbe_alloc_rx_buffers()
1527 rx_desc = IXGBE_RX_DESC(rx_ring, 0); in ixgbe_alloc_rx_buffers()
1528 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1529 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1538 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1540 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1541 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1544 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1552 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1567 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, in ixgbe_update_rsc_stats() argument
1574 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1575 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1577 ixgbe_set_rsc_gso_size(rx_ring, skb); in ixgbe_update_rsc_stats()
1593 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, in ixgbe_process_skb_fields() argument
1597 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1599 ixgbe_update_rsc_stats(rx_ring, skb); in ixgbe_process_skb_fields()
1601 ixgbe_rx_hash(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1603 ixgbe_rx_checksum(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1606 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); in ixgbe_process_skb_fields()
1614 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1639 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, in ixgbe_is_non_eop() argument
1643 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1646 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1647 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1649 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_is_non_eop()
1652 if (ring_is_rsc_enabled(rx_ring)) { in ixgbe_is_non_eop()
1674 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1675 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1692 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, in ixgbe_pull_tail() argument
1732 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, in ixgbe_dma_sync_frag() argument
1737 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1738 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_dma_sync_frag()
1743 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1746 ixgbe_rx_bufsz(rx_ring), in ixgbe_dma_sync_frag()
1770 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, in ixgbe_cleanup_headers() argument
1774 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1786 ixgbe_pull_tail(rx_ring, skb); in ixgbe_cleanup_headers()
1790 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) in ixgbe_cleanup_headers()
1808 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_page() argument
1812 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1814 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1818 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
1824 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, in ixgbe_reuse_rx_page()
1826 ixgbe_rx_bufsz(rx_ring), in ixgbe_reuse_rx_page()
1850 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, in ixgbe_add_rx_frag() argument
1858 unsigned int truesize = ixgbe_rx_bufsz(rx_ring); in ixgbe_add_rx_frag()
1861 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - in ixgbe_add_rx_frag()
1862 ixgbe_rx_bufsz(rx_ring); in ixgbe_add_rx_frag()
1875 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_add_rx_frag()
1909 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_fetch_rx_buffer() argument
1916 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_fetch_rx_buffer()
1933 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in ixgbe_fetch_rx_buffer()
1936 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_fetch_rx_buffer()
1959 ixgbe_dma_sync_frag(rx_ring, skb); in ixgbe_fetch_rx_buffer()
1963 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_fetch_rx_buffer()
1966 ixgbe_rx_bufsz(rx_ring), in ixgbe_fetch_rx_buffer()
1973 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { in ixgbe_fetch_rx_buffer()
1975 ixgbe_reuse_rx_page(rx_ring, rx_buffer); in ixgbe_fetch_rx_buffer()
1981 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in ixgbe_fetch_rx_buffer()
1982 ixgbe_rx_pg_size(rx_ring), in ixgbe_fetch_rx_buffer()
2006 struct ixgbe_ring *rx_ring, in ixgbe_clean_rx_irq() argument
2015 u16 cleaned_count = ixgbe_desc_unused(rx_ring); in ixgbe_clean_rx_irq()
2023 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbe_clean_rx_irq()
2027 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2039 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); in ixgbe_clean_rx_irq()
2048 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2052 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2059 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); in ixgbe_clean_rx_irq()
2063 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { in ixgbe_clean_rx_irq()
2068 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2093 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2094 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2095 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2096 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
3193 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3196 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3203 struct ixgbe_ring *rx_ring) in ixgbe_configure_srrctl() argument
3207 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3223 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; in ixgbe_configure_srrctl()
3673 struct ixgbe_ring *rx_ring; in ixgbe_set_rx_buffer_len() local
3707 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
3709 set_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
3711 clear_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
3790 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
3849 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
3885 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
4437 adapter->rx_ring[filter->action]->reg_idx); in ixgbe_fdir_filter_restore()
4493 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) in ixgbe_clean_rx_ring() argument
4495 struct device *dev = rx_ring->dev; in ixgbe_clean_rx_ring()
4500 if (!rx_ring->rx_buffer_info) in ixgbe_clean_rx_ring()
4504 for (i = 0; i < rx_ring->count; i++) { in ixgbe_clean_rx_ring()
4505 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
4512 ixgbe_rx_bufsz(rx_ring), in ixgbe_clean_rx_ring()
4522 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_clean_rx_ring()
4523 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_clean_rx_ring()
4528 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_clean_rx_ring()
4529 memset(rx_ring->rx_buffer_info, 0, size); in ixgbe_clean_rx_ring()
4532 memset(rx_ring->desc, 0, rx_ring->size); in ixgbe_clean_rx_ring()
4534 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
4535 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
4536 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
4540 struct ixgbe_ring *rx_ring) in ixgbe_disable_fwd_ring() argument
4543 int index = rx_ring->queue_index + vadapter->rx_base_queue; in ixgbe_disable_fwd_ring()
4546 ixgbe_disable_rx_queue(adapter, rx_ring); in ixgbe_disable_fwd_ring()
4549 ixgbe_clean_rx_ring(rx_ring); in ixgbe_disable_fwd_ring()
4550 rx_ring->l2_accel_priv = NULL; in ixgbe_disable_fwd_ring()
4564 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_down()
4565 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4598 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
4601 adapter->rx_ring[rxbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4602 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
4603 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
5038 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
5088 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbe_down()
5461 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_setup_rx_resources() argument
5463 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
5468 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
5470 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
5471 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
5473 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
5474 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
5475 rx_ring->rx_buffer_info = vzalloc(size); in ixgbe_setup_rx_resources()
5476 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
5479 u64_stats_init(&rx_ring->syncp); in ixgbe_setup_rx_resources()
5482 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
5483 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
5486 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
5487 rx_ring->size, in ixgbe_setup_rx_resources()
5488 &rx_ring->dma, in ixgbe_setup_rx_resources()
5491 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
5492 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
5493 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
5494 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
5497 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
5498 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
5502 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
5503 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
5523 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5539 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5587 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_free_rx_resources() argument
5589 ixgbe_clean_rx_ring(rx_ring); in ixgbe_free_rx_resources()
5591 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
5592 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
5595 if (!rx_ring->desc) in ixgbe_free_rx_resources()
5598 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
5599 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
5601 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
5619 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
5620 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
5957 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
5958 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
5965 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; in ixgbe_update_stats() local
5966 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
5967 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
5968 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
5969 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
5970 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
5971 packets += rx_ring->stats.packets; in ixgbe_update_stats()
7580 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()