Lines Matching refs:skb

97 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,  in skb_panic()  argument
101 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
102 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
103 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
107 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
109 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
112 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
114 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
162 struct sk_buff *skb; in __alloc_skb_head() local
165 skb = kmem_cache_alloc_node(skbuff_head_cache, in __alloc_skb_head()
167 if (!skb) in __alloc_skb_head()
175 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb_head()
176 skb->head = NULL; in __alloc_skb_head()
177 skb->truesize = sizeof(struct sk_buff); in __alloc_skb_head()
178 atomic_set(&skb->users, 1); in __alloc_skb_head()
180 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb_head()
182 return skb; in __alloc_skb_head()
207 struct sk_buff *skb; in __alloc_skb() local
218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb()
219 if (!skb) in __alloc_skb()
221 prefetchw(skb); in __alloc_skb()
245 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
247 skb->truesize = SKB_TRUESIZE(size); in __alloc_skb()
248 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
249 atomic_set(&skb->users, 1); in __alloc_skb()
250 skb->head = data; in __alloc_skb()
251 skb->data = data; in __alloc_skb()
252 skb_reset_tail_pointer(skb); in __alloc_skb()
253 skb->end = skb->tail + size; in __alloc_skb()
254 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb()
255 skb->transport_header = (typeof(skb->transport_header))~0U; in __alloc_skb()
258 shinfo = skb_shinfo(skb); in __alloc_skb()
266 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
269 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
276 return skb; in __alloc_skb()
278 kmem_cache_free(cache, skb); in __alloc_skb()
279 skb = NULL; in __alloc_skb()
306 struct sk_buff *skb; in __build_skb() local
309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
310 if (!skb) in __build_skb()
315 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
316 skb->truesize = SKB_TRUESIZE(size); in __build_skb()
317 atomic_set(&skb->users, 1); in __build_skb()
318 skb->head = data; in __build_skb()
319 skb->data = data; in __build_skb()
320 skb_reset_tail_pointer(skb); in __build_skb()
321 skb->end = skb->tail + size; in __build_skb()
322 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb()
323 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb()
326 shinfo = skb_shinfo(skb); in __build_skb()
331 return skb; in __build_skb()
341 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
343 if (skb && frag_size) { in build_skb()
344 skb->head_frag = 1; in build_skb()
346 skb->pfmemalloc = 1; in build_skb()
348 return skb; in build_skb()
487 struct sk_buff *skb = NULL; in __alloc_rx_skb() local
502 skb = build_skb(data, fragsz); in __alloc_rx_skb()
503 if (unlikely(!skb)) in __alloc_rx_skb()
507 skb = __alloc_skb(length, gfp_mask, in __alloc_rx_skb()
510 return skb; in __alloc_rx_skb()
529 struct sk_buff *skb; in __netdev_alloc_skb() local
532 skb = __alloc_rx_skb(length, gfp_mask, 0); in __netdev_alloc_skb()
534 if (likely(skb)) { in __netdev_alloc_skb()
535 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
536 skb->dev = dev; in __netdev_alloc_skb()
539 return skb; in __netdev_alloc_skb()
559 struct sk_buff *skb; in __napi_alloc_skb() local
562 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); in __napi_alloc_skb()
564 if (likely(skb)) { in __napi_alloc_skb()
565 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
566 skb->dev = napi->dev; in __napi_alloc_skb()
569 return skb; in __napi_alloc_skb()
573 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
576 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
577 skb->len += size; in skb_add_rx_frag()
578 skb->data_len += size; in skb_add_rx_frag()
579 skb->truesize += truesize; in skb_add_rx_frag()
583 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
589 skb->len += size; in skb_coalesce_rx_frag()
590 skb->data_len += size; in skb_coalesce_rx_frag()
591 skb->truesize += truesize; in skb_coalesce_rx_frag()
601 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
603 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
606 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
610 skb_walk_frags(skb, list) in skb_clone_fraglist()
614 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
616 if (skb->head_frag) in skb_free_head()
617 put_page(virt_to_head_page(skb->head)); in skb_free_head()
619 kfree(skb->head); in skb_free_head()
622 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
624 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
627 if (skb->cloned && in skb_release_data()
628 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
650 skb_free_head(skb); in skb_release_data()
656 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
660 switch (skb->fclone) { in kfree_skbmem()
662 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
666 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
677 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
686 static void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
688 skb_dst_drop(skb); in skb_release_head_state()
690 secpath_put(skb->sp); in skb_release_head_state()
692 if (skb->destructor) { in skb_release_head_state()
694 skb->destructor(skb); in skb_release_head_state()
697 nf_conntrack_put(skb->nfct); in skb_release_head_state()
700 nf_bridge_put(skb->nf_bridge); in skb_release_head_state()
705 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
707 skb_release_head_state(skb); in skb_release_all()
708 if (likely(skb->head)) in skb_release_all()
709 skb_release_data(skb); in skb_release_all()
721 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
723 skb_release_all(skb); in __kfree_skb()
724 kfree_skbmem(skb); in __kfree_skb()
735 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
737 if (unlikely(!skb)) in kfree_skb()
739 if (likely(atomic_read(&skb->users) == 1)) in kfree_skb()
741 else if (likely(!atomic_dec_and_test(&skb->users))) in kfree_skb()
743 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
744 __kfree_skb(skb); in kfree_skb()
766 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
768 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { in skb_tx_error()
771 uarg = skb_shinfo(skb)->destructor_arg; in skb_tx_error()
774 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_tx_error()
787 void consume_skb(struct sk_buff *skb) in consume_skb() argument
789 if (unlikely(!skb)) in consume_skb()
791 if (likely(atomic_read(&skb->users) == 1)) in consume_skb()
793 else if (likely(!atomic_dec_and_test(&skb->users))) in consume_skb()
795 trace_consume_skb(skb); in consume_skb()
796 __kfree_skb(skb); in consume_skb()
864 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
866 #define C(x) n->x = skb->x in __skb_clone()
870 __copy_skb_header(n, skb); in __skb_clone()
875 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
887 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
888 skb->cloned = 1; in __skb_clone()
926 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
929 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
931 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; in skb_copy_ubufs()
935 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
956 skb_frag_unref(skb, i); in skb_copy_ubufs()
962 __skb_fill_page_desc(skb, i, head, 0, in skb_copy_ubufs()
963 skb_shinfo(skb)->frags[i].size); in skb_copy_ubufs()
967 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_copy_ubufs()
986 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
988 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
993 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
996 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1001 if (skb_pfmemalloc(skb)) in skb_clone()
1012 return __skb_clone(n, skb); in skb_clone()
1016 static void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1019 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1020 skb->csum_start += off; in skb_headers_offset_update()
1022 skb->transport_header += off; in skb_headers_offset_update()
1023 skb->network_header += off; in skb_headers_offset_update()
1024 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1025 skb->mac_header += off; in skb_headers_offset_update()
1026 skb->inner_transport_header += off; in skb_headers_offset_update()
1027 skb->inner_network_header += off; in skb_headers_offset_update()
1028 skb->inner_mac_header += off; in skb_headers_offset_update()
1040 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1042 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1064 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1066 int headerlen = skb_headroom(skb); in skb_copy()
1067 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1069 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1077 skb_put(n, skb->len); in skb_copy()
1079 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) in skb_copy()
1082 copy_skb_header(n, skb); in skb_copy()
1104 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1107 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1108 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1117 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1119 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1121 n->truesize += skb->data_len; in __pskb_copy_fclone()
1122 n->data_len = skb->data_len; in __pskb_copy_fclone()
1123 n->len = skb->len; in __pskb_copy_fclone()
1125 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1128 if (skb_orphan_frags(skb, gfp_mask)) { in __pskb_copy_fclone()
1133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1134 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1135 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1140 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1141 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1145 copy_skb_header(n, skb); in __pskb_copy_fclone()
1167 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1172 int size = nhead + skb_end_offset(skb) + ntail; in pskb_expand_head()
1177 if (skb_shared(skb)) in pskb_expand_head()
1182 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1193 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1196 skb_shinfo(skb), in pskb_expand_head()
1197 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1204 if (skb_cloned(skb)) { in pskb_expand_head()
1206 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1209 skb_frag_ref(skb, i); in pskb_expand_head()
1211 if (skb_has_frag_list(skb)) in pskb_expand_head()
1212 skb_clone_fraglist(skb); in pskb_expand_head()
1214 skb_release_data(skb); in pskb_expand_head()
1216 skb_free_head(skb); in pskb_expand_head()
1218 off = (data + nhead) - skb->head; in pskb_expand_head()
1220 skb->head = data; in pskb_expand_head()
1221 skb->head_frag = 0; in pskb_expand_head()
1222 skb->data += off; in pskb_expand_head()
1224 skb->end = size; in pskb_expand_head()
1227 skb->end = skb->head + size; in pskb_expand_head()
1229 skb->tail += off; in pskb_expand_head()
1230 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1231 skb->cloned = 0; in pskb_expand_head()
1232 skb->hdr_len = 0; in pskb_expand_head()
1233 skb->nohdr = 0; in pskb_expand_head()
1234 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1246 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1249 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1252 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1254 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1283 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1290 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1291 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1293 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1302 skb_put(n, skb->len); in skb_copy_expand()
1312 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1313 skb->len + head_copy_len)) in skb_copy_expand()
1316 copy_skb_header(n, skb); in skb_copy_expand()
1336 int skb_pad(struct sk_buff *skb, int pad) in skb_pad() argument
1342 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in skb_pad()
1343 memset(skb->data+skb->len, 0, pad); in skb_pad()
1347 ntail = skb->data_len + pad - (skb->end - skb->tail); in skb_pad()
1348 if (likely(skb_cloned(skb) || ntail > 0)) { in skb_pad()
1349 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in skb_pad()
1357 err = skb_linearize(skb); in skb_pad()
1361 memset(skb->data + skb->len, 0, pad); in skb_pad()
1365 kfree_skb(skb); in skb_pad()
1383 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
1385 if (tail != skb) { in pskb_put()
1386 skb->data_len += len; in pskb_put()
1387 skb->len += len; in pskb_put()
1402 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1404 unsigned char *tmp = skb_tail_pointer(skb); in skb_put()
1405 SKB_LINEAR_ASSERT(skb); in skb_put()
1406 skb->tail += len; in skb_put()
1407 skb->len += len; in skb_put()
1408 if (unlikely(skb->tail > skb->end)) in skb_put()
1409 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
1423 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
1425 skb->data -= len; in skb_push()
1426 skb->len += len; in skb_push()
1427 if (unlikely(skb->data<skb->head)) in skb_push()
1428 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
1429 return skb->data; in skb_push()
1443 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
1445 return skb_pull_inline(skb, len); in skb_pull()
1458 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
1460 if (skb->len > len) in skb_trim()
1461 __skb_trim(skb, len); in skb_trim()
1468 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
1472 int offset = skb_headlen(skb); in ___pskb_trim()
1473 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
1477 if (skb_cloned(skb) && in ___pskb_trim()
1478 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
1486 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1493 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1496 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
1499 skb_frag_unref(skb, i); in ___pskb_trim()
1501 if (skb_has_frag_list(skb)) in ___pskb_trim()
1502 skb_drop_fraglist(skb); in ___pskb_trim()
1506 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
1538 if (len > skb_headlen(skb)) { in ___pskb_trim()
1539 skb->data_len -= skb->len - len; in ___pskb_trim()
1540 skb->len = len; in ___pskb_trim()
1542 skb->len = len; in ___pskb_trim()
1543 skb->data_len = 0; in ___pskb_trim()
1544 skb_set_tail_pointer(skb, len); in ___pskb_trim()
1576 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
1582 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
1584 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
1585 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
1590 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) in __pskb_pull_tail()
1596 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
1601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1602 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1617 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
1653 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
1654 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
1660 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
1668 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1669 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1672 skb_frag_unref(skb, i); in __pskb_pull_tail()
1675 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
1677 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_pull_tail()
1678 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); in __pskb_pull_tail()
1684 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
1686 skb->tail += delta; in __pskb_pull_tail()
1687 skb->data_len -= delta; in __pskb_pull_tail()
1689 return skb_tail_pointer(skb); in __pskb_pull_tail()
1708 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
1710 int start = skb_headlen(skb); in skb_copy_bits()
1714 if (offset > (int)skb->len - len) in skb_copy_bits()
1721 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
1728 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
1730 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
1755 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
1889 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
1900 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
1901 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
1902 skb_headlen(skb), in __skb_splice_bits()
1904 skb_head_is_locked(skb), in __skb_splice_bits()
1911 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
1912 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
1929 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, in skb_splice_bits() argument
1944 struct sock *sk = skb->sk; in skb_splice_bits()
1951 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) in skb_splice_bits()
1959 skb_walk_frags(skb, frag_iter) { in skb_splice_bits()
1997 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
1999 int start = skb_headlen(skb); in skb_store_bits()
2003 if (offset > (int)skb->len - len) in skb_store_bits()
2009 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2042 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2070 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2073 int start = skb_headlen(skb); in __skb_checksum()
2082 csum = ops->update(skb->data + offset, copy, csum); in __skb_checksum()
2089 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
2091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2115 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2141 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2149 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2155 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2158 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2167 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2176 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2181 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2185 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2205 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2330 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
2335 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
2336 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
2338 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
2340 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
2342 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
2345 if (csstart != skb->len) in skb_copy_and_csum_dev()
2346 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
2347 skb->len - csstart, 0); in skb_copy_and_csum_dev()
2349 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
2350 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
2408 struct sk_buff *skb; in skb_queue_purge() local
2409 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
2410 kfree_skb(skb); in skb_queue_purge()
2466 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
2471 __skb_unlink(skb, list); in skb_unlink()
2518 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
2524 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
2527 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
2528 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
2530 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
2531 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
2532 skb1->data_len = skb->data_len; in skb_split_inside_header()
2534 skb->data_len = 0; in skb_split_inside_header()
2535 skb->len = len; in skb_split_inside_header()
2536 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
2539 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
2544 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
2546 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
2547 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
2548 skb->len = len; in skb_split_no_header()
2549 skb->data_len = len - pos; in skb_split_no_header()
2552 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
2555 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
2566 skb_frag_ref(skb, i); in skb_split_no_header()
2569 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
2570 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2574 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2586 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
2588 int pos = skb_headlen(skb); in skb_split()
2590 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; in skb_split()
2592 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
2594 skb_split_no_header(skb, skb1, len, pos); in skb_split()
2602 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
2604 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_prepare_for_shift()
2625 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
2630 BUG_ON(shiftlen > skb->len); in skb_shift()
2631 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ in skb_shift()
2636 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2650 if (skb_prepare_for_shift(skb) || in skb_shift()
2655 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2669 if ((shiftlen == skb->len) && in skb_shift()
2670 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
2673 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
2676 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
2680 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2708 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
2717 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
2718 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
2719 skb_shinfo(skb)->nr_frags = to; in skb_shift()
2721 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
2728 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
2731 skb->len -= shiftlen; in skb_shift()
2732 skb->data_len -= shiftlen; in skb_shift()
2733 skb->truesize -= shiftlen; in skb_shift()
2751 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
2756 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
2894 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
2903 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
2921 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, in skb_append_datato_frags() argument
2923 int len, int odd, struct sk_buff *skb), in skb_append_datato_frags() argument
2926 int frg_cnt = skb_shinfo(skb)->nr_frags; in skb_append_datato_frags()
2944 offset, copy, 0, skb); in skb_append_datato_frags()
2949 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, in skb_append_datato_frags()
2955 skb->truesize += copy; in skb_append_datato_frags()
2957 skb->len += copy; in skb_append_datato_frags()
2958 skb->data_len += copy; in skb_append_datato_frags()
2979 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
2981 unsigned char *data = skb->data; in skb_pull_rcsum()
2983 BUG_ON(len > skb->len); in skb_pull_rcsum()
2984 __skb_pull(skb, len); in skb_pull_rcsum()
2985 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
2986 return skb->data; in skb_pull_rcsum()
3225 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) in skb_gro_receive() argument
3227 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
3228 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
3229 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
3230 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
3263 delta_truesize = skb->truesize - in skb_gro_receive()
3264 SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
3266 skb->truesize -= skb->data_len; in skb_gro_receive()
3267 skb->len -= skb->data_len; in skb_gro_receive()
3268 skb->data_len = 0; in skb_gro_receive()
3270 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
3272 } else if (skb->head_frag) { in skb_gro_receive()
3275 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
3282 first_offset = skb->data - in skb_gro_receive()
3295 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()
3296 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
3301 delta_truesize = skb->truesize; in skb_gro_receive()
3307 skb->data_len -= eat; in skb_gro_receive()
3308 skb->len -= eat; in skb_gro_receive()
3312 __skb_pull(skb, offset); in skb_gro_receive()
3315 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
3317 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
3318 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
3319 __skb_header_release(skb); in skb_gro_receive()
3332 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
3361 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in __skb_to_sgvec() argument
3363 int start = skb_headlen(skb); in __skb_to_sgvec()
3371 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
3378 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
3383 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
3385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
3399 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
3439 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
3442 return __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec_nomark()
3446 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
3448 int nsg = __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec()
3473 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
3483 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
3484 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) in skb_cow_data()
3488 if (!skb_has_frag_list(skb)) { in skb_cow_data()
3494 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
3495 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
3499 *trailer = skb; in skb_cow_data()
3506 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
3566 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
3568 struct sock *sk = skb->sk; in sock_rmem_free()
3570 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
3576 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
3578 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
3582 skb_orphan(skb); in sock_queue_err_skb()
3583 skb->sk = sk; in sock_queue_err_skb()
3584 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
3585 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
3588 skb_dst_force(skb); in sock_queue_err_skb()
3590 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
3600 struct sk_buff *skb, *skb_next; in sock_dequeue_err_skb() local
3605 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
3606 if (skb && (skb_next = skb_peek(q))) in sock_dequeue_err_skb()
3614 return skb; in sock_dequeue_err_skb()
3631 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
3633 struct sock *sk = skb->sk; in skb_clone_sk()
3639 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
3652 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
3659 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
3665 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
3671 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
3674 kfree_skb(skb); in __skb_complete_tx_timestamp()
3691 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
3694 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
3702 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
3703 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); in skb_complete_tx_timestamp()
3713 struct sk_buff *skb; in __skb_tstamp_tx() local
3724 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
3726 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
3727 if (!skb) in __skb_tstamp_tx()
3731 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; in __skb_tstamp_tx()
3732 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
3736 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
3738 skb->tstamp = ktime_get_real(); in __skb_tstamp_tx()
3740 __skb_complete_tx_timestamp(skb, sk, tstype); in __skb_tstamp_tx()
3752 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
3754 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
3758 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
3759 skb->wifi_acked = acked; in skb_complete_wifi_ack()
3761 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
3769 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
3771 kfree_skb(skb); in skb_complete_wifi_ack()
3789 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
3791 if (unlikely(start > skb_headlen(skb)) || in skb_partial_csum_set()
3792 unlikely((int)start + off > skb_headlen(skb) - 2)) { in skb_partial_csum_set()
3794 start, off, skb_headlen(skb)); in skb_partial_csum_set()
3797 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
3798 skb->csum_start = skb_headroom(skb) + start; in skb_partial_csum_set()
3799 skb->csum_offset = off; in skb_partial_csum_set()
3800 skb_set_transport_header(skb, start); in skb_partial_csum_set()
3805 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
3808 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
3814 if (max > skb->len) in skb_maybe_pull_tail()
3815 max = skb->len; in skb_maybe_pull_tail()
3817 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
3820 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
3828 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
3836 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
3838 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
3842 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
3845 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
3847 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
3851 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
3862 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
3871 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
3877 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) in skb_checksum_setup_ipv4()
3880 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
3887 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
3892 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
3893 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
3894 skb->len - off, in skb_checksum_setup_ipv4()
3895 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
3907 #define OPT_HDR(type, skb, off) \ argument
3908 (type *)(skb_network_header(skb) + (off))
3910 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
3925 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
3929 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
3931 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
3939 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3946 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
3954 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3961 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
3969 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3976 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
3996 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
4001 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
4002 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
4003 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
4015 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
4019 switch (skb->protocol) { in skb_checksum_setup()
4021 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
4025 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
4037 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
4040 skb->dev->name); in __skb_warn_lro_forwarding()
4044 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
4047 skb_release_head_state(skb); in kfree_skb_partial()
4048 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
4050 __kfree_skb(skb); in kfree_skb_partial()
4147 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
4149 skb->tstamp.tv64 = 0; in skb_scrub_packet()
4150 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
4151 skb->skb_iif = 0; in skb_scrub_packet()
4152 skb->ignore_df = 0; in skb_scrub_packet()
4153 skb_dst_drop(skb); in skb_scrub_packet()
4154 skb_sender_cpu_clear(skb); in skb_scrub_packet()
4155 secpath_reset(skb); in skb_scrub_packet()
4156 nf_reset(skb); in skb_scrub_packet()
4157 nf_reset_trace(skb); in skb_scrub_packet()
4162 skb_orphan(skb); in skb_scrub_packet()
4163 skb->mark = 0; in skb_scrub_packet()
4177 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
4179 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
4182 if (skb->encapsulation) { in skb_gso_transport_seglen()
4183 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
4184 skb_transport_header(skb); in skb_gso_transport_seglen()
4187 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
4189 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
4199 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
4201 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
4202 kfree_skb(skb); in skb_reorder_vlan_header()
4206 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, in skb_reorder_vlan_header()
4208 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
4209 return skb; in skb_reorder_vlan_header()
4212 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
4217 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
4219 return skb; in skb_vlan_untag()
4222 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
4223 if (unlikely(!skb)) in skb_vlan_untag()
4226 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) in skb_vlan_untag()
4229 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
4231 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
4233 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
4234 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
4236 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
4237 if (unlikely(!skb)) in skb_vlan_untag()
4240 skb_reset_network_header(skb); in skb_vlan_untag()
4241 skb_reset_transport_header(skb); in skb_vlan_untag()
4242 skb_reset_mac_len(skb); in skb_vlan_untag()
4244 return skb; in skb_vlan_untag()
4247 kfree_skb(skb); in skb_vlan_untag()
4252 int skb_ensure_writable(struct sk_buff *skb, int write_len) in skb_ensure_writable() argument
4254 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
4257 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
4260 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
4265 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
4268 unsigned int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
4271 __skb_push(skb, offset); in __skb_vlan_pop()
4272 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
4276 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
4278 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
4281 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
4282 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
4284 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
4285 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
4287 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
4288 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
4290 skb_reset_mac_len(skb); in __skb_vlan_pop()
4292 __skb_pull(skb, offset); in __skb_vlan_pop()
4297 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
4303 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
4304 skb->vlan_tci = 0; in skb_vlan_pop()
4306 if (unlikely((skb->protocol != htons(ETH_P_8021Q) && in skb_vlan_pop()
4307 skb->protocol != htons(ETH_P_8021AD)) || in skb_vlan_pop()
4308 skb->len < VLAN_ETH_HLEN)) in skb_vlan_pop()
4311 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
4316 if (likely((skb->protocol != htons(ETH_P_8021Q) && in skb_vlan_pop()
4317 skb->protocol != htons(ETH_P_8021AD)) || in skb_vlan_pop()
4318 skb->len < VLAN_ETH_HLEN)) in skb_vlan_pop()
4321 vlan_proto = skb->protocol; in skb_vlan_pop()
4322 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
4326 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
4331 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
4333 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
4334 unsigned int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
4341 __skb_push(skb, offset); in skb_vlan_push()
4342 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
4343 skb_vlan_tag_get(skb)); in skb_vlan_push()
4346 skb->protocol = skb->vlan_proto; in skb_vlan_push()
4347 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
4348 __skb_pull(skb, offset); in skb_vlan_push()
4350 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_vlan_push()
4351 skb->csum = csum_add(skb->csum, csum_partial(skb->data in skb_vlan_push()
4354 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
4378 struct sk_buff *skb; in alloc_skb_with_frags() local
4395 skb = alloc_skb(header_len, gfp_head); in alloc_skb_with_frags()
4396 if (!skb) in alloc_skb_with_frags()
4399 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
4425 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
4429 return skb; in alloc_skb_with_frags()
4432 kfree_skb(skb); in alloc_skb_with_frags()