Lines Matching refs:rx_ring
455 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, in i40e_fd_handle_status() argument
458 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
975 static void i40e_clean_programming_status(struct i40e_ring *rx_ring, in i40e_clean_programming_status() argument
986 i40e_fd_handle_status(rx_ring, rx_desc, id); in i40e_clean_programming_status()
990 i40e_fcoe_handle_status(rx_ring, rx_desc, id); in i40e_clean_programming_status()
1042 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) in i40e_clean_rx_ring() argument
1044 struct device *dev = rx_ring->dev; in i40e_clean_rx_ring()
1050 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1053 if (ring_is_ps_enabled(rx_ring)) { in i40e_clean_rx_ring()
1054 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; in i40e_clean_rx_ring()
1056 rx_bi = &rx_ring->rx_bi[0]; in i40e_clean_rx_ring()
1062 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1063 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_ring()
1070 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1071 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_ring()
1075 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1097 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40e_clean_rx_ring()
1098 memset(rx_ring->rx_bi, 0, bi_size); in i40e_clean_rx_ring()
1101 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1103 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1104 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1113 void i40e_free_rx_resources(struct i40e_ring *rx_ring) in i40e_free_rx_resources() argument
1115 i40e_clean_rx_ring(rx_ring); in i40e_free_rx_resources()
1116 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1117 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1119 if (rx_ring->desc) { in i40e_free_rx_resources()
1120 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1121 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1122 rx_ring->desc = NULL; in i40e_free_rx_resources()
1133 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) in i40e_alloc_rx_headers() argument
1135 struct device *dev = rx_ring->dev; in i40e_alloc_rx_headers()
1142 if (rx_ring->rx_bi[0].hdr_buf) in i40e_alloc_rx_headers()
1145 buf_size = ALIGN(rx_ring->rx_hdr_len, 256); in i40e_alloc_rx_headers()
1146 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, in i40e_alloc_rx_headers()
1150 for (i = 0; i < rx_ring->count; i++) { in i40e_alloc_rx_headers()
1151 rx_bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_headers()
1163 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40e_setup_rx_descriptors() argument
1165 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1168 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40e_setup_rx_descriptors()
1169 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_rx_descriptors()
1170 if (!rx_ring->rx_bi) in i40e_setup_rx_descriptors()
1173 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1176 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) in i40e_setup_rx_descriptors()
1177 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) in i40e_setup_rx_descriptors()
1178 : rx_ring->count * sizeof(union i40e_32byte_rx_desc); in i40e_setup_rx_descriptors()
1179 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1180 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1181 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1183 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1185 rx_ring->size); in i40e_setup_rx_descriptors()
1189 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1190 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1194 kfree(rx_ring->rx_bi); in i40e_setup_rx_descriptors()
1195 rx_ring->rx_bi = NULL; in i40e_setup_rx_descriptors()
1204 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
1206 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1213 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1221 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers_ps() argument
1223 u16 i = rx_ring->next_to_use; in i40e_alloc_rx_buffers_ps()
1228 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers_ps()
1232 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_alloc_rx_buffers_ps()
1233 bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_buffers_ps()
1240 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_rx_buffers_ps()
1248 bi->page_dma = dma_map_page(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1253 if (dma_mapping_error(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1255 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_rx_buffers_ps()
1261 dma_sync_single_range_for_device(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1264 rx_ring->rx_hdr_len, in i40e_alloc_rx_buffers_ps()
1272 if (i == rx_ring->count) in i40e_alloc_rx_buffers_ps()
1277 if (rx_ring->next_to_use != i) in i40e_alloc_rx_buffers_ps()
1278 i40e_release_rx_desc(rx_ring, i); in i40e_alloc_rx_buffers_ps()
1286 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers_1buf() argument
1288 u16 i = rx_ring->next_to_use; in i40e_alloc_rx_buffers_1buf()
1294 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers_1buf()
1298 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_alloc_rx_buffers_1buf()
1299 bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_buffers_1buf()
1303 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_alloc_rx_buffers_1buf()
1304 rx_ring->rx_buf_len); in i40e_alloc_rx_buffers_1buf()
1306 rx_ring->rx_stats.alloc_buff_failed++; in i40e_alloc_rx_buffers_1buf()
1310 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_alloc_rx_buffers_1buf()
1315 bi->dma = dma_map_single(rx_ring->dev, in i40e_alloc_rx_buffers_1buf()
1317 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers_1buf()
1319 if (dma_mapping_error(rx_ring->dev, bi->dma)) { in i40e_alloc_rx_buffers_1buf()
1320 rx_ring->rx_stats.alloc_buff_failed++; in i40e_alloc_rx_buffers_1buf()
1329 if (i == rx_ring->count) in i40e_alloc_rx_buffers_1buf()
1334 if (rx_ring->next_to_use != i) in i40e_alloc_rx_buffers_1buf()
1335 i40e_release_rx_desc(rx_ring, i); in i40e_alloc_rx_buffers_1buf()
1344 static void i40e_receive_skb(struct i40e_ring *rx_ring, in i40e_receive_skb() argument
1347 struct i40e_q_vector *q_vector = rx_ring->q_vector; in i40e_receive_skb()
1348 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_receive_skb()
1518 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_ps() argument
1522 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_ps()
1524 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_ps()
1525 u16 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1540 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); in i40e_clean_rx_irq_ps()
1544 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1545 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_ps()
1559 i40e_clean_programming_status(rx_ring, rx_desc); in i40e_clean_rx_irq_ps()
1560 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1563 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1566 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
1567 rx_ring->rx_hdr_len); in i40e_clean_rx_irq_ps()
1569 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq_ps()
1574 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
1576 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_irq_ps()
1579 rx_ring->rx_hdr_len, in i40e_clean_rx_irq_ps()
1635 dma_unmap_page(rx_ring->dev, in i40e_clean_rx_irq_ps()
1641 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1647 next_buffer = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1649 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_ps()
1662 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1668 rx_ring->last_rx_timestamp = jiffies; in i40e_clean_rx_irq_ps()
1675 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1683 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1688 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1689 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1691 rx_ring->netdev->last_rx = jiffies; in i40e_clean_rx_irq_ps()
1696 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1697 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1698 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1699 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1700 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1701 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1713 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_1buf() argument
1716 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_1buf()
1717 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_1buf()
1731 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); in i40e_clean_rx_irq_1buf()
1735 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_1buf()
1736 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_1buf()
1751 i40e_clean_programming_status(rx_ring, rx_desc); in i40e_clean_rx_irq_1buf()
1752 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1755 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_1buf()
1775 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, in i40e_clean_rx_irq_1buf()
1779 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1783 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_1buf()
1796 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1802 rx_ring->last_rx_timestamp = jiffies; in i40e_clean_rx_irq_1buf()
1809 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1817 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_1buf()
1822 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1824 rx_ring->netdev->last_rx = jiffies; in i40e_clean_rx_irq_1buf()
1828 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1829 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1830 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()
1831 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1832 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1833 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()