Lines Matching refs:sk
68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
84 tcp_rearm_rto(sk); in tcp_event_new_data_sent()
87 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent()
97 static inline __u32 tcp_acceptable_seq(const struct sock *sk) in tcp_acceptable_seq() argument
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
121 static __u16 tcp_advertise_mss(struct sock *sk) in tcp_advertise_mss() argument
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
124 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_advertise_mss()
141 static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) in tcp_cwnd_restart() argument
143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
148 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
162 struct sock *sk) in tcp_event_data_sent() argument
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
166 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_event_data_sent()
170 tcp_cwnd_restart(sk, __sk_dst_get(sk)); in tcp_event_data_sent()
183 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) in tcp_event_ack_sent() argument
185 tcp_dec_quickack_mode(sk, pkts); in tcp_event_ack_sent()
186 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in tcp_event_ack_sent()
269 static u16 tcp_select_window(struct sock *sk) in tcp_select_window() argument
271 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
274 u32 new_win = __tcp_select_window(sk); in tcp_select_window()
286 NET_INC_STATS(sock_net(sk), in tcp_select_window()
308 NET_INC_STATS(sock_net(sk), in tcp_select_window()
311 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); in tcp_select_window()
318 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
320 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
325 else if (tcp_ca_needs_ecn(sk)) in tcp_ecn_send_synack()
326 INET_ECN_xmit(sk); in tcp_ecn_send_synack()
330 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
332 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
333 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || in tcp_ecn_send_syn()
334 tcp_ca_needs_ecn(sk); in tcp_ecn_send_syn()
337 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_ecn_send_syn()
348 if (tcp_ca_needs_ecn(sk)) in tcp_ecn_send_syn()
349 INET_ECN_xmit(sk); in tcp_ecn_send_syn()
355 struct sock *sk) in tcp_ecn_make_synack() argument
359 if (tcp_ca_needs_ecn(sk)) in tcp_ecn_make_synack()
360 INET_ECN_xmit(sk); in tcp_ecn_make_synack()
367 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
370 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
376 INET_ECN_xmit(sk); in tcp_ecn_send()
382 } else if (!tcp_ca_needs_ecn(sk)) { in tcp_ecn_send()
384 INET_ECN_dontxmit(sk); in tcp_ecn_send()
547 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
551 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options()
556 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
574 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
613 static unsigned int tcp_synack_options(struct sock *sk, in tcp_synack_options() argument
676 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
680 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options()
687 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
738 static void tcp_tsq_handler(struct sock *sk) in tcp_tsq_handler() argument
740 if ((1 << sk->sk_state) & in tcp_tsq_handler()
743 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, in tcp_tsq_handler()
759 struct sock *sk; in tcp_tasklet_func() local
769 sk = (struct sock *)tp; in tcp_tasklet_func()
770 bh_lock_sock(sk); in tcp_tasklet_func()
772 if (!sock_owned_by_user(sk)) { in tcp_tasklet_func()
773 tcp_tsq_handler(sk); in tcp_tasklet_func()
778 bh_unlock_sock(sk); in tcp_tasklet_func()
781 sk_free(sk); in tcp_tasklet_func()
796 void tcp_release_cb(struct sock *sk) in tcp_release_cb() argument
798 struct tcp_sock *tp = tcp_sk(sk); in tcp_release_cb()
810 tcp_tsq_handler(sk); in tcp_release_cb()
821 sock_release_ownership(sk); in tcp_release_cb()
824 tcp_write_timer_handler(sk); in tcp_release_cb()
825 __sock_put(sk); in tcp_release_cb()
828 tcp_delack_timer_handler(sk); in tcp_release_cb()
829 __sock_put(sk); in tcp_release_cb()
832 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
833 __sock_put(sk); in tcp_release_cb()
859 struct sock *sk = skb->sk; in tcp_wfree() local
860 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree()
866 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); in tcp_wfree()
892 sk_free(sk); in tcp_wfree()
906 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
909 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb()
932 inet = inet_sk(sk); in tcp_transmit_skb()
933 tp = tcp_sk(sk); in tcp_transmit_skb()
938 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in tcp_transmit_skb()
940 tcp_options_size = tcp_established_options(sk, skb, &opts, in tcp_transmit_skb()
945 tcp_ca_event(sk, CA_EVENT_TX_START); in tcp_transmit_skb()
954 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in tcp_transmit_skb()
960 skb->sk = sk; in tcp_transmit_skb()
962 skb_set_hash_from_sk(skb, sk); in tcp_transmit_skb()
963 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in tcp_transmit_skb()
980 th->window = htons(tcp_select_window(sk)); in tcp_transmit_skb()
998 tcp_ecn_send(sk, skb, tcp_header_size); in tcp_transmit_skb()
1003 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in tcp_transmit_skb()
1005 md5, sk, skb); in tcp_transmit_skb()
1009 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb()
1012 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); in tcp_transmit_skb()
1015 tcp_event_data_sent(tp, sk); in tcp_transmit_skb()
1018 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, in tcp_transmit_skb()
1031 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb()
1036 tcp_enter_cwr(sk); in tcp_transmit_skb()
1046 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1048 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb()
1053 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1054 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
1055 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1059 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_set_skb_tso_segs() argument
1077 shinfo->gso_type = sk->sk_gso_type; in tcp_set_skb_tso_segs()
1084 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, in tcp_adjust_fackets_out() argument
1087 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_fackets_out()
1099 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1101 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount()
1116 tcp_adjust_fackets_out(sk, skb, decr); in tcp_adjust_pcount()
1146 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, in tcp_fragment() argument
1149 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment()
1166 buff = sk_stream_alloc_skb(sk, nsize, gfp); in tcp_fragment()
1170 sk->sk_wmem_queued += buff->truesize; in tcp_fragment()
1171 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1209 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_fragment()
1210 tcp_set_skb_tso_segs(sk, buff, mss_now); in tcp_fragment()
1220 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1225 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1273 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1284 sk->sk_wmem_queued -= len; in tcp_trim_head()
1285 sk_mem_uncharge(sk, len); in tcp_trim_head()
1286 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_trim_head()
1290 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); in tcp_trim_head()
1296 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) in __tcp_mtu_to_mss() argument
1298 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss()
1299 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1309 const struct dst_entry *dst = __sk_dst_get(sk); in __tcp_mtu_to_mss()
1329 int tcp_mtu_to_mss(struct sock *sk, int pmtu) in tcp_mtu_to_mss() argument
1332 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1333 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1337 int tcp_mss_to_mtu(struct sock *sk, int mss) in tcp_mss_to_mtu() argument
1339 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu()
1340 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1350 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_mss_to_mtu()
1359 void tcp_mtup_init(struct sock *sk) in tcp_mtup_init() argument
1361 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init()
1362 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1363 struct net *net = sock_net(sk); in tcp_mtup_init()
1368 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); in tcp_mtup_init()
1397 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) in tcp_sync_mss() argument
1399 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss()
1400 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1406 mss_now = tcp_mtu_to_mss(sk, pmtu); in tcp_sync_mss()
1412 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1422 unsigned int tcp_current_mss(struct sock *sk) in tcp_current_mss() argument
1424 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss()
1425 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_current_mss()
1435 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1436 mss_now = tcp_sync_mss(sk, mtu); in tcp_current_mss()
1439 header_len = tcp_established_options(sk, NULL, &opts, &md5) + in tcp_current_mss()
1457 static void tcp_cwnd_application_limited(struct sock *sk) in tcp_cwnd_application_limited() argument
1459 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited()
1461 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1462 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1464 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1467 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1475 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) in tcp_cwnd_validate() argument
1477 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate()
1489 if (tcp_is_cwnd_limited(sk)) { in tcp_cwnd_validate()
1499 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) in tcp_cwnd_validate()
1500 tcp_cwnd_application_limited(sk); in tcp_cwnd_validate()
1544 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) in tcp_tso_autosize() argument
1548 bytes = min(sk->sk_pacing_rate >> 10, in tcp_tso_autosize()
1549 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); in tcp_tso_autosize()
1558 return min_t(u32, segs, sk->sk_gso_max_segs); in tcp_tso_autosize()
1562 static unsigned int tcp_mss_split_point(const struct sock *sk, in tcp_mss_split_point() argument
1568 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point()
1574 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1622 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_init_tso_segs() argument
1628 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_init_tso_segs()
1677 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, in tcp_snd_test() argument
1680 const struct tcp_sock *tp = tcp_sk(sk); in tcp_snd_test()
1683 tcp_init_tso_segs(sk, skb, cur_mss); in tcp_snd_test()
1696 bool tcp_may_send_now(struct sock *sk) in tcp_may_send_now() argument
1698 const struct tcp_sock *tp = tcp_sk(sk); in tcp_may_send_now()
1699 struct sk_buff *skb = tcp_send_head(sk); in tcp_may_send_now()
1702 tcp_snd_test(sk, skb, tcp_current_mss(sk), in tcp_may_send_now()
1703 (tcp_skb_is_last(sk, skb) ? in tcp_may_send_now()
1714 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1723 return tcp_fragment(sk, skb, len, mss_now, gfp); in tso_fragment()
1725 buff = sk_stream_alloc_skb(sk, 0, gfp); in tso_fragment()
1729 sk->sk_wmem_queued += buff->truesize; in tso_fragment()
1730 sk_mem_charge(sk, buff->truesize); in tso_fragment()
1752 tcp_set_skb_tso_segs(sk, skb, mss_now); in tso_fragment()
1753 tcp_set_skb_tso_segs(sk, buff, mss_now); in tso_fragment()
1757 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
1767 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
1770 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer()
1772 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer()
1805 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1828 head = tcp_write_queue_head(sk); in tcp_tso_should_defer()
1846 static inline void tcp_mtu_check_reprobe(struct sock *sk) in tcp_mtu_check_reprobe() argument
1848 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe()
1849 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe()
1850 struct net *net = sock_net(sk); in tcp_mtu_check_reprobe()
1857 int mss = tcp_current_mss(sk); in tcp_mtu_check_reprobe()
1864 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
1880 static int tcp_mtu_probe(struct sock *sk) in tcp_mtu_probe() argument
1882 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe()
1883 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe()
1885 struct net *net = sock_net(sk); in tcp_mtu_probe()
1899 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
1908 mss_now = tcp_current_mss(sk); in tcp_mtu_probe()
1909 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
1917 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
1922 tcp_mtu_check_reprobe(sk); in tcp_mtu_probe()
1944 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC); in tcp_mtu_probe()
1947 sk->sk_wmem_queued += nskb->truesize; in tcp_mtu_probe()
1948 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
1950 skb = tcp_send_head(sk); in tcp_mtu_probe()
1959 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
1962 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
1975 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
1976 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
1987 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_mtu_probe()
1997 tcp_init_tso_segs(sk, nskb, nskb->len); in tcp_mtu_probe()
2002 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { in tcp_mtu_probe()
2006 tcp_event_new_data_sent(sk, nskb); in tcp_mtu_probe()
2008 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2032 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, in tcp_write_xmit() argument
2035 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit()
2047 result = tcp_mtu_probe(sk); in tcp_write_xmit()
2055 max_segs = tcp_tso_autosize(sk, mss_now); in tcp_write_xmit()
2056 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2059 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); in tcp_write_xmit()
2083 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2088 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2095 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2102 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2115 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); in tcp_write_xmit()
2118 if (atomic_read(&sk->sk_wmem_alloc) > limit) { in tcp_write_xmit()
2125 if (atomic_read(&sk->sk_wmem_alloc) > limit) in tcp_write_xmit()
2129 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2136 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2146 if (tcp_in_cwnd_reduction(sk)) in tcp_write_xmit()
2151 tcp_schedule_loss_probe(sk); in tcp_write_xmit()
2152 tcp_cwnd_validate(sk, is_cwnd_limited); in tcp_write_xmit()
2155 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); in tcp_write_xmit()
2158 bool tcp_schedule_loss_probe(struct sock *sk) in tcp_schedule_loss_probe() argument
2160 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe()
2161 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe()
2169 tcp_rearm_rto(sk); in tcp_schedule_loss_probe()
2175 if (sk->sk_state == TCP_SYN_RECV) in tcp_schedule_loss_probe()
2186 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) in tcp_schedule_loss_probe()
2190 tcp_send_head(sk)) in tcp_schedule_loss_probe()
2204 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; in tcp_schedule_loss_probe()
2211 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, in tcp_schedule_loss_probe()
2221 static bool skb_still_in_host_queue(const struct sock *sk, in skb_still_in_host_queue() argument
2224 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2225 NET_INC_STATS_BH(sock_net(sk), in skb_still_in_host_queue()
2235 void tcp_send_loss_probe(struct sock *sk) in tcp_send_loss_probe() argument
2237 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe()
2240 int mss = tcp_current_mss(sk); in tcp_send_loss_probe()
2243 if (tcp_send_head(sk)) { in tcp_send_loss_probe()
2244 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); in tcp_send_loss_probe()
2253 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2257 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2265 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2268 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2274 err = __tcp_retransmit_skb(sk, skb); in tcp_send_loss_probe()
2281 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_send_loss_probe()
2282 inet_csk(sk)->icsk_rto, in tcp_send_loss_probe()
2286 NET_INC_STATS_BH(sock_net(sk), in tcp_send_loss_probe()
2294 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, in __tcp_push_pending_frames() argument
2301 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2304 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, in __tcp_push_pending_frames()
2305 sk_gfp_atomic(sk, GFP_ATOMIC))) in __tcp_push_pending_frames()
2306 tcp_check_probe_timer(sk); in __tcp_push_pending_frames()
2312 void tcp_push_one(struct sock *sk, unsigned int mss_now) in tcp_push_one() argument
2314 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one()
2318 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2373 u32 __tcp_select_window(struct sock *sk) in __tcp_select_window() argument
2375 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window()
2376 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window()
2384 int free_space = tcp_space(sk); in __tcp_select_window()
2385 int allowed_space = tcp_full_space(sk); in __tcp_select_window()
2395 if (sk_under_memory_pressure(sk)) in __tcp_select_window()
2452 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2454 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans()
2455 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); in tcp_collapse_retrans()
2463 tcp_highest_sack_combine(sk, next_skb, skb); in tcp_collapse_retrans()
2465 tcp_unlink_write_queue(next_skb, sk); in tcp_collapse_retrans()
2492 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); in tcp_collapse_retrans()
2494 sk_wmem_free_skb(sk, next_skb); in tcp_collapse_retrans()
2498 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2507 if (skb == tcp_send_head(sk)) in tcp_can_collapse()
2519 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, in tcp_retrans_try_collapse() argument
2522 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse()
2531 tcp_for_write_queue_from_safe(skb, tmp, sk) { in tcp_retrans_try_collapse()
2532 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2553 tcp_collapse_retrans(sk, to); in tcp_retrans_try_collapse()
2561 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in __tcp_retransmit_skb() argument
2563 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb()
2564 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb()
2576 if (atomic_read(&sk->sk_wmem_alloc) > in __tcp_retransmit_skb()
2577 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) in __tcp_retransmit_skb()
2580 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
2586 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2590 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
2593 cur_mss = tcp_current_mss(sk); in __tcp_retransmit_skb()
2605 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) in __tcp_retransmit_skb()
2613 tcp_init_tso_segs(sk, skb, cur_mss); in __tcp_retransmit_skb()
2614 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); in __tcp_retransmit_skb()
2618 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2632 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : in __tcp_retransmit_skb()
2635 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2641 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); in __tcp_retransmit_skb()
2643 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in __tcp_retransmit_skb()
2649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_retransmit_skb() argument
2651 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb()
2652 int err = __tcp_retransmit_skb(sk, skb); in tcp_retransmit_skb()
2674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); in tcp_retransmit_skb()
2686 static bool tcp_can_forward_retransmit(struct sock *sk) in tcp_can_forward_retransmit() argument
2688 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_can_forward_retransmit()
2689 const struct tcp_sock *tp = tcp_sk(sk); in tcp_can_forward_retransmit()
2707 if (tcp_may_send_now(sk)) in tcp_can_forward_retransmit()
2721 void tcp_xmit_retransmit_queue(struct sock *sk) in tcp_xmit_retransmit_queue() argument
2723 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue()
2724 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue()
2743 skb = tcp_write_queue_head(sk); in tcp_xmit_retransmit_queue()
2747 tcp_for_write_queue_from(skb, sk) { in tcp_xmit_retransmit_queue()
2750 if (skb == tcp_send_head(sk)) in tcp_xmit_retransmit_queue()
2774 if (!tcp_can_forward_retransmit(sk)) in tcp_xmit_retransmit_queue()
2800 if (tcp_retransmit_skb(sk, skb)) in tcp_xmit_retransmit_queue()
2803 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_xmit_retransmit_queue()
2805 if (tcp_in_cwnd_reduction(sk)) in tcp_xmit_retransmit_queue()
2808 if (skb == tcp_write_queue_head(sk)) in tcp_xmit_retransmit_queue()
2809 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_xmit_retransmit_queue()
2810 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
2820 static void sk_forced_wmem_schedule(struct sock *sk, int size) in sk_forced_wmem_schedule() argument
2824 if (size <= sk->sk_forward_alloc) in sk_forced_wmem_schedule()
2827 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in sk_forced_wmem_schedule()
2828 sk_memory_allocated_add(sk, amt, &status); in sk_forced_wmem_schedule()
2834 void tcp_send_fin(struct sock *sk) in tcp_send_fin() argument
2836 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); in tcp_send_fin()
2837 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin()
2844 if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { in tcp_send_fin()
2849 if (!tcp_send_head(sk)) { in tcp_send_fin()
2860 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
2867 sk_forced_wmem_schedule(sk, skb->truesize); in tcp_send_fin()
2871 tcp_queue_skb(sk, skb); in tcp_send_fin()
2873 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); in tcp_send_fin()
2881 void tcp_send_active_reset(struct sock *sk, gfp_t priority) in tcp_send_active_reset() argument
2888 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
2894 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
2898 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
2899 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
2901 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); in tcp_send_active_reset()
2910 int tcp_send_synack(struct sock *sk) in tcp_send_synack() argument
2914 skb = tcp_write_queue_head(sk); in tcp_send_synack()
2924 tcp_unlink_write_queue(skb, sk); in tcp_send_synack()
2926 __tcp_add_write_queue_head(sk, nskb); in tcp_send_synack()
2927 sk_wmem_free_skb(sk, skb); in tcp_send_synack()
2928 sk->sk_wmem_queued += nskb->truesize; in tcp_send_synack()
2929 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
2934 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
2936 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
2948 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, in tcp_make_synack() argument
2954 struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack()
2961 skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); in tcp_make_synack()
2985 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
2987 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, in tcp_make_synack()
2997 tcp_ecn_make_synack(req, th, sk); in tcp_make_synack()
3014 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); in tcp_make_synack()
3030 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) in tcp_ca_dst_init() argument
3032 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init()
3050 static void tcp_connect_init(struct sock *sk) in tcp_connect_init() argument
3052 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_connect_init()
3053 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init()
3063 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3071 tcp_mtup_init(sk); in tcp_connect_init()
3072 tcp_sync_mss(sk, dst_mtu(dst)); in tcp_connect_init()
3074 tcp_ca_dst_init(sk, dst); in tcp_connect_init()
3082 tcp_initialize_rcv_mss(sk); in tcp_connect_init()
3085 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3086 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3087 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3089 tcp_select_initial_window(tcp_full_space(sk), in tcp_connect_init()
3100 sk->sk_err = 0; in tcp_connect_init()
3101 sock_reset_flag(sk, SOCK_DONE); in tcp_connect_init()
3116 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; in tcp_connect_init()
3117 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3121 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3123 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb()
3128 __tcp_add_write_queue_tail(sk, skb); in tcp_connect_queue_skb()
3129 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
3130 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3142 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) in tcp_send_syn_data() argument
3144 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data()
3151 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, in tcp_send_syn_data()
3171 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - in tcp_send_syn_data()
3179 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation); in tcp_send_syn_data()
3201 tcp_connect_queue_skb(sk, syn_data); in tcp_send_syn_data()
3203 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3216 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); in tcp_send_syn_data()
3224 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3233 int tcp_connect(struct sock *sk) in tcp_connect() argument
3235 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect()
3239 tcp_connect_init(sk); in tcp_connect()
3242 tcp_finish_connect(sk, NULL); in tcp_connect()
3246 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); in tcp_connect()
3252 tcp_connect_queue_skb(sk, buff); in tcp_connect()
3253 tcp_ecn_send_syn(sk, buff); in tcp_connect()
3256 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3257 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3266 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); in tcp_connect()
3269 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_connect()
3270 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
3279 void tcp_send_delayed_ack(struct sock *sk) in tcp_send_delayed_ack() argument
3281 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack()
3285 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); in tcp_send_delayed_ack()
3288 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack()
3322 tcp_send_ack(sk); in tcp_send_delayed_ack()
3331 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3335 void tcp_send_ack(struct sock *sk) in tcp_send_ack() argument
3340 if (sk->sk_state == TCP_CLOSE) in tcp_send_ack()
3343 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); in tcp_send_ack()
3349 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()
3351 inet_csk_schedule_ack(sk); in tcp_send_ack()
3352 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in tcp_send_ack()
3353 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_send_ack()
3360 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); in tcp_send_ack()
3372 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()
3387 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) in tcp_xmit_probe_skb() argument
3389 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb()
3393 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_xmit_probe_skb()
3405 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); in tcp_xmit_probe_skb()
3408 void tcp_send_window_probe(struct sock *sk) in tcp_send_window_probe() argument
3410 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
3411 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
3412 tcp_xmit_probe_skb(sk, 0); in tcp_send_window_probe()
3417 int tcp_write_wakeup(struct sock *sk) in tcp_write_wakeup() argument
3419 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup()
3422 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
3425 skb = tcp_send_head(sk); in tcp_write_wakeup()
3428 unsigned int mss = tcp_current_mss(sk); in tcp_write_wakeup()
3442 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) in tcp_write_wakeup()
3445 tcp_set_skb_tso_segs(sk, skb, mss); in tcp_write_wakeup()
3448 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3450 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()
3454 tcp_xmit_probe_skb(sk, 1); in tcp_write_wakeup()
3455 return tcp_xmit_probe_skb(sk, 0); in tcp_write_wakeup()
3462 void tcp_send_probe0(struct sock *sk) in tcp_send_probe0() argument
3464 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0()
3465 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0()
3469 err = tcp_write_wakeup(sk); in tcp_send_probe0()
3471 if (tp->packets_out || !tcp_send_head(sk)) { in tcp_send_probe0()
3494 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_send_probe0()
3499 int tcp_rtx_synack(struct sock *sk, struct request_sock *req) in tcp_rtx_synack() argument
3505 res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL); in tcp_rtx_synack()
3507 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); in tcp_rtx_synack()
3508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in tcp_rtx_synack()