Lines Matching refs:skb

53 	if (tx_buffer->skb) {  in i40e_unmap_and_free_tx_resource()
57 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
71 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
248 dev_kfree_skb_any(tx_buf->skb); in i40e_clean_tx_irq()
257 tx_buf->skb = NULL; in i40e_clean_tx_irq()
562 if (rx_bi->skb) { in i40evf_clean_rx_ring()
563 dev_kfree_skb(rx_bi->skb); in i40evf_clean_rx_ring()
564 rx_bi->skb = NULL; in i40evf_clean_rx_ring()
718 if (bi->skb) /* desc is in use */ in i40evf_alloc_rx_buffers_ps()
774 struct sk_buff *skb; in i40evf_alloc_rx_buffers_1buf() local
783 skb = bi->skb; in i40evf_alloc_rx_buffers_1buf()
785 if (!skb) { in i40evf_alloc_rx_buffers_1buf()
786 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40evf_alloc_rx_buffers_1buf()
788 if (!skb) { in i40evf_alloc_rx_buffers_1buf()
793 skb_record_rx_queue(skb, rx_ring->queue_index); in i40evf_alloc_rx_buffers_1buf()
794 bi->skb = skb; in i40evf_alloc_rx_buffers_1buf()
799 skb->data, in i40evf_alloc_rx_buffers_1buf()
828 struct sk_buff *skb, u16 vlan_tag) in i40e_receive_skb() argument
835 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); in i40e_receive_skb()
838 netif_rx(skb); in i40e_receive_skb()
840 napi_gro_receive(&q_vector->napi, skb); in i40e_receive_skb()
852 struct sk_buff *skb, in i40e_rx_checksum() argument
869 skb->ip_summed = CHECKSUM_NONE; in i40e_rx_checksum()
919 skb->transport_header = skb->mac_header + in i40e_rx_checksum()
921 (ip_hdr(skb)->ihl * 4); in i40e_rx_checksum()
924 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || in i40e_rx_checksum()
925 skb->protocol == htons(ETH_P_8021AD)) in i40e_rx_checksum()
928 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && in i40e_rx_checksum()
929 (udp_hdr(skb)->check != 0)) { in i40e_rx_checksum()
930 rx_udp_csum = udp_csum(skb); in i40e_rx_checksum()
931 iph = ip_hdr(skb); in i40e_rx_checksum()
933 (skb->len - in i40e_rx_checksum()
934 skb_transport_offset(skb)), in i40e_rx_checksum()
937 if (udp_hdr(skb)->check != csum) in i40e_rx_checksum()
943 skb->ip_summed = CHECKSUM_UNNECESSARY; in i40e_rx_checksum()
944 skb->csum_level = ipv4_tunnel || ipv6_tunnel; in i40e_rx_checksum()
1016 struct sk_buff *skb; in i40e_clean_rx_irq_ps() local
1039 skb = rx_bi->skb; in i40e_clean_rx_irq_ps()
1040 if (likely(!skb)) { in i40e_clean_rx_irq_ps()
1041 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
1043 if (!skb) { in i40e_clean_rx_irq_ps()
1049 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
1072 rx_bi->skb = NULL; in i40e_clean_rx_irq_ps()
1080 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); in i40e_clean_rx_irq_ps()
1081 } else if (skb->len == 0) { in i40e_clean_rx_irq_ps()
1084 len = (rx_packet_len > skb_headlen(skb) ? in i40e_clean_rx_irq_ps()
1085 skb_headlen(skb) : rx_packet_len); in i40e_clean_rx_irq_ps()
1086 memcpy(__skb_put(skb, len), in i40e_clean_rx_irq_ps()
1095 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in i40e_clean_rx_irq_ps()
1100 skb->len += rx_packet_len; in i40e_clean_rx_irq_ps()
1101 skb->data_len += rx_packet_len; in i40e_clean_rx_irq_ps()
1102 skb->truesize += rx_packet_len; in i40e_clean_rx_irq_ps()
1123 next_buffer->skb = skb; in i40e_clean_rx_irq_ps()
1130 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_ps()
1137 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1140 total_rx_bytes += skb->len; in i40e_clean_rx_irq_ps()
1143 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1145 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); in i40e_clean_rx_irq_ps()
1151 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1152 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_ps()
1156 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1157 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1195 struct sk_buff *skb; in i40e_clean_rx_irq_1buf() local
1219 skb = rx_bi->skb; in i40e_clean_rx_irq_1buf()
1220 prefetch(skb->data); in i40e_clean_rx_irq_1buf()
1231 rx_bi->skb = NULL; in i40e_clean_rx_irq_1buf()
1237 skb_put(skb, rx_packet_len); in i40e_clean_rx_irq_1buf()
1252 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_1buf()
1259 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1262 total_rx_bytes += skb->len; in i40e_clean_rx_irq_1buf()
1265 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1267 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); in i40e_clean_rx_irq_1buf()
1272 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1366 static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, in i40e_tx_prepare_vlan_flags() argument
1370 __be16 protocol = skb->protocol; in i40e_tx_prepare_vlan_flags()
1382 skb->protocol = vlan_get_protocol(skb); in i40e_tx_prepare_vlan_flags()
1387 if (skb_vlan_tag_present(skb)) { in i40e_tx_prepare_vlan_flags()
1388 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; in i40e_tx_prepare_vlan_flags()
1393 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); in i40e_tx_prepare_vlan_flags()
1418 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
1429 if (!skb_is_gso(skb)) in i40e_tso()
1432 err = skb_cow_head(skb, 0); in i40e_tso()
1436 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); in i40e_tso()
1437 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); in i40e_tso()
1440 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); in i40e_tso()
1446 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); in i40e_tso()
1452 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); in i40e_tso()
1453 *hdr_len = (skb->encapsulation in i40e_tso()
1454 ? (skb_inner_transport_header(skb) - skb->data) in i40e_tso()
1455 : skb_transport_offset(skb)) + l4len; in i40e_tso()
1459 cd_tso_len = skb->len - *hdr_len; in i40e_tso()
1460 cd_mss = skb_shinfo(skb)->gso_size; in i40e_tso()
1476 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, in i40e_tx_enable_csum() argument
1488 if (skb->encapsulation) { in i40e_tx_enable_csum()
1489 switch (ip_hdr(skb)->protocol) { in i40e_tx_enable_csum()
1496 network_hdr_len = skb_inner_network_header_len(skb); in i40e_tx_enable_csum()
1497 this_ip_hdr = inner_ip_hdr(skb); in i40e_tx_enable_csum()
1498 this_ipv6_hdr = inner_ipv6_hdr(skb); in i40e_tx_enable_csum()
1499 this_tcp_hdrlen = inner_tcp_hdrlen(skb); in i40e_tx_enable_csum()
1505 ip_hdr(skb)->check = 0; in i40e_tx_enable_csum()
1513 ip_hdr(skb)->check = 0; in i40e_tx_enable_csum()
1517 *cd_tunneling |= (skb_network_header_len(skb) >> 2) << in i40e_tx_enable_csum()
1520 ((skb_inner_network_offset(skb) - in i40e_tx_enable_csum()
1521 skb_transport_offset(skb)) >> 1) << in i40e_tx_enable_csum()
1530 network_hdr_len = skb_network_header_len(skb); in i40e_tx_enable_csum()
1531 this_ip_hdr = ip_hdr(skb); in i40e_tx_enable_csum()
1532 this_ipv6_hdr = ipv6_hdr(skb); in i40e_tx_enable_csum()
1533 this_tcp_hdrlen = tcp_hdrlen(skb); in i40e_tx_enable_csum()
1559 *td_offset |= (skb_network_offset(skb) >> 1) << in i40e_tx_enable_csum()
1627 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) in i40e_chk_linearize() argument
1635 num_frags = skb_shinfo(skb)->nr_frags; in i40e_chk_linearize()
1636 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_chk_linearize()
1649 frag = &skb_shinfo(skb)->frags[0]; in i40e_chk_linearize()
1654 if ((size >= skb_shinfo(skb)->gso_size) && in i40e_chk_linearize()
1656 size = (size % skb_shinfo(skb)->gso_size); in i40e_chk_linearize()
1684 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
1688 unsigned int data_len = skb->data_len; in i40e_tx_map()
1689 unsigned int size = skb_headlen(skb); in i40e_tx_map()
1705 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tx_map()
1710 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); in i40e_tx_map()
1712 first->skb = skb; in i40e_tx_map()
1715 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
1720 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in i40e_tx_map()
1879 static int i40e_xmit_descriptor_count(struct sk_buff *skb, in i40e_xmit_descriptor_count() argument
1891 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in i40e_xmit_descriptor_count()
1892 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); in i40e_xmit_descriptor_count()
1894 count += TXD_USE_COUNT(skb_headlen(skb)); in i40e_xmit_descriptor_count()
1909 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, in i40e_xmit_frame_ring() argument
1921 if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
1925 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
1929 protocol = vlan_get_protocol(skb); in i40e_xmit_frame_ring()
1940 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, in i40e_xmit_frame_ring()
1948 if (i40e_chk_linearize(skb, tx_flags)) in i40e_xmit_frame_ring()
1949 if (skb_linearize(skb)) in i40e_xmit_frame_ring()
1952 skb_tx_timestamp(skb); in i40e_xmit_frame_ring()
1958 if (skb->ip_summed == CHECKSUM_PARTIAL) { in i40e_xmit_frame_ring()
1961 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, in i40e_xmit_frame_ring()
1968 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
1976 dev_kfree_skb_any(skb); in i40e_xmit_frame_ring()
1987 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) in i40evf_xmit_frame() argument
1990 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame()
1995 if (unlikely(skb->len < I40E_MIN_TX_LEN)) { in i40evf_xmit_frame()
1996 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) in i40evf_xmit_frame()
1998 skb->len = I40E_MIN_TX_LEN; in i40evf_xmit_frame()
1999 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); in i40evf_xmit_frame()
2002 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()