/linux-4.1.27/include/trace/events/ |
D | net.h | 35 __field( u16, gso_segs ) 56 __entry->gso_segs = skb_shinfo(skb)->gso_segs; 67 __entry->gso_size, __entry->gso_segs, __entry->gso_type)
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 245 total_packets += tx_buf->gso_segs; in i40e_clean_tx_irq() 1633 u16 gso_segs; in i40e_chk_linearize() local 1636 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_chk_linearize() 1644 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > in i40e_chk_linearize() 1696 u16 gso_segs; in i40e_tx_map() local 1705 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tx_map() 1707 gso_segs = 1; in i40e_tx_map() 1710 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); in i40e_tx_map() 1711 first->gso_segs = gso_segs; in i40e_tx_map()
|
D | i40e_txrx.h | 154 unsigned short gso_segs; member
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 726 total_packets += tx_buf->gso_segs; in i40e_clean_tx_irq() 2424 u16 gso_segs; in i40e_chk_linearize() local 2427 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_chk_linearize() 2435 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > in i40e_chk_linearize() 2493 u16 gso_segs; in i40e_tx_map() local 2502 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tx_map() 2504 gso_segs = 1; in i40e_tx_map() 2507 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); in i40e_tx_map() 2508 first->gso_segs = gso_segs; in i40e_tx_map()
|
D | i40e_txrx.h | 155 unsigned short gso_segs; member
|
/linux-4.1.27/net/core/ |
D | tso.c | 10 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs()
|
D | dev.c | 2616 u16 gso_segs = skb_shinfo(skb)->gso_segs; in netif_skb_features() local 2618 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) in netif_skb_features() 2788 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init() local 2800 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, in qdisc_pkt_len_init() 2803 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; in qdisc_pkt_len_init()
|
D | skbuff.c | 1036 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; in copy_skb_header()
|
/linux-4.1.27/net/ipv4/ |
D | tcp_offload.c | 104 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment() 282 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in tcp_gro_complete()
|
D | ip_tunnel_core.c | 78 skb_shinfo(skb)->gso_segs ?: 1); in iptunnel_xmit()
|
D | ip_input.c | 421 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); in ip_rcv()
|
D | udp_offload.c | 214 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in udp4_ufo_fragment()
|
D | tcp.c | 853 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal() 856 tp->gso_segs = min_t(u16, new_size_goal / mss_now, in tcp_xmit_size_goal() 858 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
|
D | ip_output.c | 441 skb_shinfo(skb)->gso_segs ?: 1); in ip_queue_xmit()
|
D | tcp_output.c | 1022 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in tcp_transmit_skb()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | sunvnet.c | 1154 int gso_size, gso_type, gso_segs; in vnet_handle_offloads() local 1176 gso_segs = skb_shinfo(skb)->gso_segs; in vnet_handle_offloads() 1179 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); in vnet_handle_offloads() 1181 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { in vnet_handle_offloads() 1186 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) in vnet_handle_offloads() 1200 skb_shinfo(skb)->gso_segs = gso_segs; in vnet_handle_offloads() 1218 skb_shinfo(curr)->gso_segs = in vnet_handle_offloads()
|
/linux-4.1.27/net/ipv6/ |
D | udp_offload.c | 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in udp6_ufo_fragment()
|
D | ip6_input.c | 114 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); in ipv6_rcv()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 822 first->gso_segs = skb_shinfo(skb)->gso_segs; in fm10k_tso() 823 first->bytecount += (first->gso_segs - 1) * hdrlen; in fm10k_tso() 1133 first->gso_segs = 1; in fm10k_xmit_frame_ring() 1253 total_packets += tx_buffer->gso_segs; in fm10k_clean_tx_irq()
|
D | fm10k.h | 82 u16 gso_segs; member
|
/linux-4.1.27/include/linux/ |
D | tcp.h | 135 u16 gso_segs; /* Max number of segs per GSO packet */ member
|
D | skbuff.h | 321 unsigned short gso_segs; member
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf.h | 60 unsigned short gso_segs; member
|
D | ixgbevf_main.c | 326 total_packets += tx_buffer->gso_segs; in ixgbevf_clean_tx_irq() 3317 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbevf_tso() 3318 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbevf_tso() 3642 first->gso_segs = 1; in ixgbevf_xmit_frame()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_fcoe.c | 592 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, in ixgbe_fso() 594 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbe_fso()
|
D | ixgbe.h | 180 unsigned short gso_segs; member
|
D | ixgbe_main.c | 1114 total_packets += tx_buffer->gso_segs; in ixgbe_clean_tx_irq() 6826 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbe_tso() 6827 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbe_tso() 7312 first->gso_segs = 1; in ixgbe_xmit_frame_ring()
|
/linux-4.1.27/drivers/net/ethernet/tile/ |
D | tilegx.c | 1688 for (segment = 0; segment < sh->gso_segs; segment++) { in tso_count_edescs() 1762 for (segment = 0; segment < sh->gso_segs; segment++) { in tso_headers_prepare() 1788 if (segment != sh->gso_segs - 1) { in tso_headers_prepare() 1861 for (segment = 0; segment < sh->gso_segs; segment++) { in tso_egress()
|
D | tilepro.c | 1711 unsigned int num_segs = sh->gso_segs; in tile_net_tx_tso()
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb.h | 195 u16 gso_segs; member
|
D | igb_main.c | 4695 first->gso_segs = skb_shinfo(skb)->gso_segs; in igb_tso() 4696 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igb_tso() 5032 first->gso_segs = 1; in igb_xmit_frame_ring() 6439 total_packets += tx_buffer->gso_segs; in igb_clean_tx_irq()
|
/linux-4.1.27/include/net/ |
D | sch_generic.h | 522 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; in bstats_update()
|
/linux-4.1.27/drivers/net/ |
D | macvtap.c | 613 skb_shinfo(skb)->gso_segs = 0; in macvtap_skb_from_vnet_hdr()
|
D | virtio_net.c | 516 skb_shinfo(skb)->gso_segs = 0; in receive_buf()
|
D | xen-netfront.c | 816 skb_shinfo(skb)->gso_segs = 0; in xennet_set_skb_gso()
|
D | tun.c | 1195 skb_shinfo(skb)->gso_segs = 0; in tun_get_user()
|
/linux-4.1.27/drivers/net/xen-netback/ |
D | netback.c | 1496 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1308 txq->tx_cso += ssi->gso_segs; in t4vf_eth_xmit()
|
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-drv.c | 1274 packet->tx_packets = skb_shinfo(skb)->gso_segs; in xgbe_prep_tso()
|
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 829 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 1234 q->tx_cso += ssi->gso_segs; in t4_eth_xmit()
|
/linux-4.1.27/net/packet/ |
D | af_packet.c | 2582 skb_shinfo(skb)->gso_segs = 0; in packet_snd()
|
/linux-4.1.27/drivers/net/ethernet/intel/e1000/ |
D | e1000_main.c | 2953 segs = skb_shinfo(skb)->gso_segs ?: 1; in e1000_tx_map()
|
/linux-4.1.27/drivers/net/ethernet/emulex/benet/ |
D | be_main.c | 674 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1); in be_tx_stats_update()
|
/linux-4.1.27/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 5380 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/ |
D | tg3.c | 7841 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; in tg3_tso_bug_gso_check() 7853 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; in tg3_tso_bug()
|