Lines Matching refs:sk

94 int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)  in __ip_local_out_sk()  argument
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL, in __ip_local_out_sk()
106 return __ip_local_out_sk(skb->sk, skb); in __ip_local_out()
109 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) in ip_local_out_sk() argument
115 err = dst_output_sk(sk, skb); in ip_local_out_sk()
134 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, in ip_build_and_send_pkt() argument
137 struct inet_sock *inet = inet_sk(sk); in ip_build_and_send_pkt()
148 if (ip_dont_fragment(sk, &rt->dst)) in ip_build_and_send_pkt()
155 iph->protocol = sk->sk_protocol; in ip_build_and_send_pkt()
156 ip_select_ident(sock_net(sk), skb, sk); in ip_build_and_send_pkt()
163 skb->priority = sk->sk_priority; in ip_build_and_send_pkt()
164 skb->mark = sk->sk_mark; in ip_build_and_send_pkt()
171 static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) in ip_finish_output2() argument
194 if (skb->sk) in ip_finish_output2()
195 skb_set_owner_w(skb2, skb->sk); in ip_finish_output2()
219 static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) in ip_finish_output_gso() argument
228 return ip_finish_output2(sk, skb); in ip_finish_output_gso()
252 err = ip_fragment(sk, segs, ip_finish_output2); in ip_finish_output_gso()
262 static int ip_finish_output(struct sock *sk, struct sk_buff *skb) in ip_finish_output() argument
268 return dst_output_sk(sk, skb); in ip_finish_output()
272 return ip_finish_output_gso(sk, skb); in ip_finish_output()
275 return ip_fragment(sk, skb, ip_finish_output2); in ip_finish_output()
277 return ip_finish_output2(sk, skb); in ip_finish_output()
280 int ip_mc_output(struct sock *sk, struct sk_buff *skb) in ip_mc_output() argument
298 if (sk_mc_loop(sk) in ip_mc_output()
316 sk, newskb, NULL, newskb->dev, in ip_mc_output()
331 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb, in ip_mc_output()
335 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL, in ip_mc_output()
340 int ip_output(struct sock *sk, struct sk_buff *skb) in ip_output() argument
349 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, in ip_output()
370 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) in ip_queue_xmit() argument
372 struct inet_sock *inet = inet_sk(sk); in ip_queue_xmit()
390 rt = (struct rtable *)__sk_dst_check(sk, 0); in ip_queue_xmit()
403 rt = ip_route_output_ports(sock_net(sk), fl4, sk, in ip_queue_xmit()
407 sk->sk_protocol, in ip_queue_xmit()
408 RT_CONN_FLAGS(sk), in ip_queue_xmit()
409 sk->sk_bound_dev_if); in ip_queue_xmit()
412 sk_setup_caps(sk, &rt->dst); in ip_queue_xmit()
425 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) in ip_queue_xmit()
430 iph->protocol = sk->sk_protocol; in ip_queue_xmit()
440 ip_select_ident_segs(sock_net(sk), skb, sk, in ip_queue_xmit()
444 skb->priority = sk->sk_priority; in ip_queue_xmit()
445 skb->mark = sk->sk_mark; in ip_queue_xmit()
453 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in ip_queue_xmit()
489 int ip_fragment(struct sock *sk, struct sk_buff *skb, in ip_fragment() argument
561 BUG_ON(frag->sk); in ip_fragment()
562 if (skb->sk) { in ip_fragment()
563 frag->sk = skb->sk; in ip_fragment()
603 err = output(sk, skb); in ip_fragment()
632 frag2->sk = NULL; in ip_fragment()
693 if (skb->sk) in ip_fragment()
694 skb_set_owner_w(skb2, skb->sk); in ip_fragment()
740 err = output(sk, skb2); in ip_fragment()
786 static inline int ip_ufo_append_data(struct sock *sk, in ip_ufo_append_data() argument
802 skb = sock_alloc_send_skb(sk, in ip_ufo_append_data()
834 return skb_append_datato_frags(sk, skb, getfrag, from, in ip_ufo_append_data()
838 static int __ip_append_data(struct sock *sk, in __ip_append_data() argument
848 struct inet_sock *inet = inet_sk(sk); in __ip_append_data()
868 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) in __ip_append_data()
869 tskey = sk->sk_tskey++; in __ip_append_data()
875 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; in __ip_append_data()
878 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, in __ip_append_data()
895 (sk->sk_protocol == IPPROTO_UDP) && in __ip_append_data()
897 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { in __ip_append_data()
898 err = ip_ufo_append_data(sk, queue, getfrag, from, length, in __ip_append_data()
961 skb = sock_alloc_send_skb(sk, in __ip_append_data()
966 if (atomic_read(&sk->sk_wmem_alloc) <= in __ip_append_data()
967 2 * sk->sk_sndbuf) in __ip_append_data()
968 skb = sock_wmalloc(sk, in __ip_append_data()
970 sk->sk_allocation); in __ip_append_data()
1046 if (!sk_page_frag_refill(sk, pfrag)) in __ip_append_data()
1071 atomic_add(copy, &sk->sk_wmem_alloc); in __ip_append_data()
1083 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); in __ip_append_data()
1087 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, in ip_setup_cork() argument
1100 sk->sk_allocation); in ip_setup_cork()
1115 cork->fragsize = ip_sk_use_pmtu(sk) ? in ip_setup_cork()
1138 int ip_append_data(struct sock *sk, struct flowi4 *fl4, in ip_append_data() argument
1145 struct inet_sock *inet = inet_sk(sk); in ip_append_data()
1151 if (skb_queue_empty(&sk->sk_write_queue)) { in ip_append_data()
1152 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); in ip_append_data()
1159 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, in ip_append_data()
1160 sk_page_frag(sk), getfrag, in ip_append_data()
1164 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, in ip_append_page() argument
1167 struct inet_sock *inet = inet_sk(sk); in ip_append_page()
1184 if (skb_queue_empty(&sk->sk_write_queue)) in ip_append_page()
1200 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; in ip_append_page()
1203 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, in ip_append_page()
1208 skb = skb_peek_tail(&sk->sk_write_queue); in ip_append_page()
1214 (sk->sk_protocol == IPPROTO_UDP) && in ip_append_page()
1240 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); in ip_append_page()
1273 __skb_queue_tail(&sk->sk_write_queue, skb); in ip_append_page()
1299 atomic_add(len, &sk->sk_wmem_alloc); in ip_append_page()
1307 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); in ip_append_page()
1324 struct sk_buff *__ip_make_skb(struct sock *sk, in __ip_make_skb() argument
1331 struct inet_sock *inet = inet_sk(sk); in __ip_make_skb()
1332 struct net *net = sock_net(sk); in __ip_make_skb()
1355 tmp_skb->sk = NULL; in __ip_make_skb()
1362 skb->ignore_df = ip_sk_ignore_df(sk); in __ip_make_skb()
1370 ip_dont_fragment(sk, &rt->dst))) in __ip_make_skb()
1389 iph->protocol = sk->sk_protocol; in __ip_make_skb()
1391 ip_select_ident(net, skb, sk); in __ip_make_skb()
1398 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; in __ip_make_skb()
1399 skb->mark = sk->sk_mark; in __ip_make_skb()
1431 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) in ip_push_pending_frames() argument
1435 skb = ip_finish_skb(sk, fl4); in ip_push_pending_frames()
1440 return ip_send_skb(sock_net(sk), skb); in ip_push_pending_frames()
1446 static void __ip_flush_pending_frames(struct sock *sk, in __ip_flush_pending_frames() argument
1458 void ip_flush_pending_frames(struct sock *sk) in ip_flush_pending_frames() argument
1460 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); in ip_flush_pending_frames()
1463 struct sk_buff *ip_make_skb(struct sock *sk, in ip_make_skb() argument
1483 err = ip_setup_cork(sk, &cork, ipc, rtp); in ip_make_skb()
1487 err = __ip_append_data(sk, fl4, &queue, &cork, in ip_make_skb()
1491 __ip_flush_pending_frames(sk, &queue, &cork); in ip_make_skb()
1495 return __ip_make_skb(sk, fl4, &queue, &cork); in ip_make_skb()
1515 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, in ip_send_unicast_reply() argument
1525 struct net *net = sock_net(sk); in ip_send_unicast_reply()
1557 inet_sk(sk)->tos = arg->tos; in ip_send_unicast_reply()
1559 sk->sk_priority = skb->priority; in ip_send_unicast_reply()
1560 sk->sk_protocol = ip_hdr(skb)->protocol; in ip_send_unicast_reply()
1561 sk->sk_bound_dev_if = arg->bound_dev_if; in ip_send_unicast_reply()
1562 sk->sk_sndbuf = sysctl_wmem_default; in ip_send_unicast_reply()
1563 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, in ip_send_unicast_reply()
1566 ip_flush_pending_frames(sk); in ip_send_unicast_reply()
1570 nskb = skb_peek(&sk->sk_write_queue); in ip_send_unicast_reply()
1578 ip_push_pending_frames(sk, &fl4); in ip_send_unicast_reply()