Lines Matching refs:sk

130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)  in tcp_measure_rcv_mss()  argument
132 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
163 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
176 static void tcp_incr_quickack(struct sock *sk) in tcp_incr_quickack() argument
178 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
187 static void tcp_enter_quickack_mode(struct sock *sk) in tcp_enter_quickack_mode() argument
189 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
190 tcp_incr_quickack(sk); in tcp_enter_quickack_mode()
199 static inline bool tcp_in_quickack_mode(const struct sock *sk) in tcp_in_quickack_mode() argument
201 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
283 static void tcp_sndbuf_expand(struct sock *sk) in tcp_sndbuf_expand() argument
285 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
308 if (sk->sk_sndbuf < sndmem) in tcp_sndbuf_expand()
309 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); in tcp_sndbuf_expand()
338 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
340 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
347 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
355 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
357 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
361 (int)tp->rcv_ssthresh < tcp_space(sk) && in tcp_grow_window()
362 !sk_under_memory_pressure(sk)) { in tcp_grow_window()
371 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
377 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
383 static void tcp_fixup_rcvbuf(struct sock *sk) in tcp_fixup_rcvbuf() argument
385 u32 mss = tcp_sk(sk)->advmss; in tcp_fixup_rcvbuf()
397 if (sk->sk_rcvbuf < rcvmem) in tcp_fixup_rcvbuf()
398 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); in tcp_fixup_rcvbuf()
404 void tcp_init_buffer_space(struct sock *sk) in tcp_init_buffer_space() argument
406 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
409 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) in tcp_init_buffer_space()
410 tcp_fixup_rcvbuf(sk); in tcp_init_buffer_space()
411 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) in tcp_init_buffer_space()
412 tcp_sndbuf_expand(sk); in tcp_init_buffer_space()
418 maxwin = tcp_full_space(sk); in tcp_init_buffer_space()
440 static void tcp_clamp_window(struct sock *sk) in tcp_clamp_window() argument
442 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window()
443 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
447 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && in tcp_clamp_window()
448 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && in tcp_clamp_window()
449 !sk_under_memory_pressure(sk) && in tcp_clamp_window()
450 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { in tcp_clamp_window()
451 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), in tcp_clamp_window()
454 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window()
465 void tcp_initialize_rcv_mss(struct sock *sk) in tcp_initialize_rcv_mss() argument
467 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss()
474 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
538 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, in tcp_rcv_rtt_measure_ts() argument
541 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts()
544 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
552 void tcp_rcv_space_adjust(struct sock *sk) in tcp_rcv_space_adjust() argument
554 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust()
577 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in tcp_rcv_space_adjust()
604 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust()
605 sk->sk_rcvbuf = rcvbuf; in tcp_rcv_space_adjust()
628 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
630 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv()
631 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv()
634 inet_csk_schedule_ack(sk); in tcp_event_data_recv()
636 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
646 tcp_incr_quickack(sk); in tcp_event_data_recv()
662 tcp_incr_quickack(sk); in tcp_event_data_recv()
663 sk_mem_reclaim(sk); in tcp_event_data_recv()
671 tcp_grow_window(sk, skb); in tcp_event_data_recv()
683 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) in tcp_rtt_estimator() argument
685 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator()
734 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
740 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
753 static void tcp_update_pacing_rate(struct sock *sk) in tcp_update_pacing_rate() argument
755 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate()
770 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, in tcp_update_pacing_rate()
771 sk->sk_max_pacing_rate); in tcp_update_pacing_rate()
777 static void tcp_set_rto(struct sock *sk) in tcp_set_rto() argument
779 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto()
790 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
801 tcp_bound_rto(sk); in tcp_set_rto()
831 static void tcp_update_reordering(struct sock *sk, const int metric, in tcp_update_reordering() argument
834 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_reordering()
850 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_update_reordering()
853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_update_reordering()
1040 static void tcp_mark_lost_retrans(struct sock *sk) in tcp_mark_lost_retrans() argument
1042 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mark_lost_retrans()
1043 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_lost_retrans()
1054 tcp_for_write_queue(skb, sk) { in tcp_mark_lost_retrans()
1057 if (skb == tcp_send_head(sk)) in tcp_mark_lost_retrans()
1083 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); in tcp_mark_lost_retrans()
1095 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, in tcp_check_dsack() argument
1099 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack()
1107 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1116 NET_INC_STATS_BH(sock_net(sk), in tcp_check_dsack()
1145 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1183 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1192 static u8 tcp_sacktag_one(struct sock *sk, in tcp_sacktag_one() argument
1198 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one()
1282 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1287 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb()
1288 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1300 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1321 skb_shinfo(prev)->gso_type = sk->sk_gso_type; in tcp_shifted_skb()
1335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1352 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1353 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1355 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1356 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1358 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1380 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1385 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data()
1392 if (!sk_can_gso(sk)) in tcp_shift_skb_data()
1406 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1408 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1484 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1490 if (prev == tcp_write_queue_tail(sk)) in tcp_shift_skb_data()
1492 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1495 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1503 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); in tcp_shift_skb_data()
1514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1518 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1524 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk()
1527 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1531 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1540 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1552 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1562 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1573 tcp_sacktag_one(sk, in tcp_sacktag_walk()
1584 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1595 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1599 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1600 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1612 struct sock *sk, in tcp_maybe_skipping_dsack() argument
1621 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1622 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1636 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, in tcp_sacktag_write_queue() argument
1639 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue()
1660 tcp_highest_sack_reset(sk); in tcp_sacktag_write_queue()
1663 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, in tcp_sacktag_write_queue()
1704 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
1730 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1765 skb = tcp_sacktag_skip(skb, sk, &state, in tcp_sacktag_write_queue()
1767 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1778 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1785 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1793 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); in tcp_sacktag_write_queue()
1800 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1805 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); in tcp_sacktag_write_queue()
1808 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, in tcp_sacktag_write_queue()
1824 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) in tcp_sacktag_write_queue()
1825 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); in tcp_sacktag_write_queue()
1827 tcp_mark_lost_retrans(sk); in tcp_sacktag_write_queue()
1862 static void tcp_check_reno_reordering(struct sock *sk, const int addend) in tcp_check_reno_reordering() argument
1864 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering()
1866 tcp_update_reordering(sk, tp->packets_out + addend, 0); in tcp_check_reno_reordering()
1871 static void tcp_add_reno_sack(struct sock *sk) in tcp_add_reno_sack() argument
1873 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack()
1875 tcp_check_reno_reordering(sk, 0); in tcp_add_reno_sack()
1881 static void tcp_remove_reno_sacks(struct sock *sk, int acked) in tcp_remove_reno_sacks() argument
1883 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks()
1892 tcp_check_reno_reordering(sk, acked); in tcp_remove_reno_sacks()
1922 void tcp_enter_loss(struct sock *sk) in tcp_enter_loss() argument
1924 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss()
1925 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss()
1935 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1936 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1937 tcp_ca_event(sk, CA_EVENT_LOSS); in tcp_enter_loss()
1950 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
1953 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_enter_loss()
1959 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
1960 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
1980 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_enter_loss()
1990 !inet_csk(sk)->icsk_mtup.probe_size; in tcp_enter_loss()
2003 static bool tcp_check_sack_reneging(struct sock *sk, int flag) in tcp_check_sack_reneging() argument
2006 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging()
2010 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_check_sack_reneging()
2042 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) in tcp_pause_early_retransmit() argument
2044 struct tcp_sock *tp = tcp_sk(sk); in tcp_pause_early_retransmit()
2058 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) in tcp_pause_early_retransmit()
2061 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, in tcp_pause_early_retransmit()
2159 static bool tcp_time_to_recover(struct sock *sk, int flag) in tcp_time_to_recover() argument
2161 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover()
2178 !tcp_may_send_now(sk)) { in tcp_time_to_recover()
2192 tcp_is_sack(tp) && !tcp_send_head(sk)) in tcp_time_to_recover()
2202 !tcp_may_send_now(sk)) in tcp_time_to_recover()
2203 return !tcp_pause_early_retransmit(sk, flag); in tcp_time_to_recover()
2214 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) in tcp_mark_head_lost() argument
2216 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost()
2229 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2232 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2236 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2237 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2259 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, in tcp_mark_head_lost()
2276 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) in tcp_update_scoreboard() argument
2278 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard()
2281 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2286 tcp_mark_head_lost(sk, lost, 0); in tcp_update_scoreboard()
2290 tcp_mark_head_lost(sk, sacked_upto, 0); in tcp_update_scoreboard()
2292 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2332 static bool tcp_any_retrans_done(const struct sock *sk) in tcp_any_retrans_done() argument
2334 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done()
2340 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2348 static void DBGUNDO(struct sock *sk, const char *msg) in DBGUNDO() argument
2350 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO()
2351 struct inet_sock *inet = inet_sk(sk); in DBGUNDO()
2353 if (sk->sk_family == AF_INET) { in DBGUNDO()
2362 else if (sk->sk_family == AF_INET6) { in DBGUNDO()
2363 struct ipv6_pinfo *np = inet6_sk(sk); in DBGUNDO()
2377 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) in tcp_undo_cwnd_reduction() argument
2379 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction()
2384 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2385 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2394 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction()
2397 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2418 static bool tcp_try_undo_recovery(struct sock *sk) in tcp_try_undo_recovery() argument
2420 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery()
2428 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); in tcp_try_undo_recovery()
2429 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_recovery()
2430 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) in tcp_try_undo_recovery()
2435 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2442 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_recovery()
2446 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_recovery()
2451 static bool tcp_try_undo_dsack(struct sock *sk) in tcp_try_undo_dsack() argument
2453 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack()
2456 DBGUNDO(sk, "D-SACK"); in tcp_try_undo_dsack()
2457 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_dsack()
2458 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2465 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) in tcp_try_undo_loss() argument
2467 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss()
2470 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_loss()
2472 DBGUNDO(sk, "partial loss"); in tcp_try_undo_loss()
2473 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2475 NET_INC_STATS_BH(sock_net(sk), in tcp_try_undo_loss()
2477 inet_csk(sk)->icsk_retransmits = 0; in tcp_try_undo_loss()
2479 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_loss()
2495 static void tcp_init_cwnd_reduction(struct sock *sk) in tcp_init_cwnd_reduction() argument
2497 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction()
2505 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2509 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, in tcp_cwnd_reduction() argument
2512 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction()
2533 static inline void tcp_end_cwnd_reduction(struct sock *sk) in tcp_end_cwnd_reduction() argument
2535 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction()
2538 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || in tcp_end_cwnd_reduction()
2543 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); in tcp_end_cwnd_reduction()
2547 void tcp_enter_cwr(struct sock *sk) in tcp_enter_cwr() argument
2549 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr()
2552 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_cwr()
2554 tcp_init_cwnd_reduction(sk); in tcp_enter_cwr()
2555 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_enter_cwr()
2559 static void tcp_try_keep_open(struct sock *sk) in tcp_try_keep_open() argument
2561 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open()
2564 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2567 if (inet_csk(sk)->icsk_ca_state != state) { in tcp_try_keep_open()
2568 tcp_set_ca_state(sk, state); in tcp_try_keep_open()
2573 static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) in tcp_try_to_open() argument
2575 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open()
2579 if (!tcp_any_retrans_done(sk)) in tcp_try_to_open()
2583 tcp_enter_cwr(sk); in tcp_try_to_open()
2585 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { in tcp_try_to_open()
2586 tcp_try_keep_open(sk); in tcp_try_to_open()
2588 tcp_cwnd_reduction(sk, prior_unsacked, 0); in tcp_try_to_open()
2592 static void tcp_mtup_probe_failed(struct sock *sk) in tcp_mtup_probe_failed() argument
2594 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed()
2600 static void tcp_mtup_probe_success(struct sock *sk) in tcp_mtup_probe_success() argument
2602 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success()
2603 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success()
2606 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2608 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2612 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2616 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2623 void tcp_simple_retransmit(struct sock *sk) in tcp_simple_retransmit() argument
2625 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit()
2626 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit()
2628 unsigned int mss = tcp_current_mss(sk); in tcp_simple_retransmit()
2631 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2632 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2661 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2664 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_simple_retransmit()
2666 tcp_xmit_retransmit_queue(sk); in tcp_simple_retransmit()
2670 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) in tcp_enter_recovery() argument
2672 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery()
2680 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_enter_recovery()
2685 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_recovery()
2687 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2688 tcp_init_cwnd_reduction(sk); in tcp_enter_recovery()
2690 tcp_set_ca_state(sk, TCP_CA_Recovery); in tcp_enter_recovery()
2696 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) in tcp_process_loss() argument
2698 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss()
2702 tcp_try_undo_loss(sk, false)) in tcp_process_loss()
2710 tcp_try_undo_loss(sk, true)) in tcp_process_loss()
2718 __tcp_push_pending_frames(sk, tcp_current_mss(sk), in tcp_process_loss()
2728 tcp_try_undo_recovery(sk); in tcp_process_loss()
2736 tcp_add_reno_sack(sk); in tcp_process_loss()
2740 tcp_xmit_retransmit_queue(sk); in tcp_process_loss()
2744 static bool tcp_try_undo_partial(struct sock *sk, const int acked, in tcp_try_undo_partial() argument
2747 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial()
2753 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); in tcp_try_undo_partial()
2761 tcp_cwnd_reduction(sk, prior_unsacked, 0); in tcp_try_undo_partial()
2765 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_partial()
2768 DBGUNDO(sk, "partial recovery"); in tcp_try_undo_partial()
2769 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_partial()
2770 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
2771 tcp_try_keep_open(sk); in tcp_try_undo_partial()
2788 static void tcp_fastretrans_alert(struct sock *sk, const int acked, in tcp_fastretrans_alert() argument
2792 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert()
2793 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert()
2809 if (tcp_check_sack_reneging(sk, flag)) in tcp_fastretrans_alert()
2826 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2827 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_fastretrans_alert()
2834 if (tcp_try_undo_recovery(sk)) in tcp_fastretrans_alert()
2836 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2846 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2848 if (tcp_try_undo_partial(sk, acked, prior_unsacked)) in tcp_fastretrans_alert()
2854 if (tcp_try_undo_dsack(sk)) { in tcp_fastretrans_alert()
2855 tcp_try_keep_open(sk); in tcp_fastretrans_alert()
2860 tcp_process_loss(sk, flag, is_dupack); in tcp_fastretrans_alert()
2869 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2873 tcp_try_undo_dsack(sk); in tcp_fastretrans_alert()
2875 if (!tcp_time_to_recover(sk, flag)) { in tcp_fastretrans_alert()
2876 tcp_try_to_open(sk, flag, prior_unsacked); in tcp_fastretrans_alert()
2884 tcp_mtup_probe_failed(sk); in tcp_fastretrans_alert()
2887 tcp_simple_retransmit(sk); in tcp_fastretrans_alert()
2892 tcp_enter_recovery(sk, (flag & FLAG_ECE)); in tcp_fastretrans_alert()
2897 tcp_update_scoreboard(sk, fast_rexmit); in tcp_fastretrans_alert()
2898 tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit); in tcp_fastretrans_alert()
2899 tcp_xmit_retransmit_queue(sk); in tcp_fastretrans_alert()
2902 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, in tcp_ack_update_rtt() argument
2905 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt()
2931 tcp_rtt_estimator(sk, seq_rtt_us); in tcp_ack_update_rtt()
2932 tcp_set_rto(sk); in tcp_ack_update_rtt()
2935 inet_csk(sk)->icsk_backoff = 0; in tcp_ack_update_rtt()
2940 static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) in tcp_synack_rtt_meas() argument
2942 struct tcp_sock *tp = tcp_sk(sk); in tcp_synack_rtt_meas()
2952 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); in tcp_synack_rtt_meas()
2955 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cong_avoid() argument
2957 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid()
2959 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
2960 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; in tcp_cong_avoid()
2966 void tcp_rearm_rto(struct sock *sk) in tcp_rearm_rto() argument
2968 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto()
2969 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto()
2978 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); in tcp_rearm_rto()
2980 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rearm_rto()
2984 struct sk_buff *skb = tcp_write_queue_head(sk); in tcp_rearm_rto()
2994 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, in tcp_rearm_rto()
3002 void tcp_resume_early_retransmit(struct sock *sk) in tcp_resume_early_retransmit() argument
3004 struct tcp_sock *tp = tcp_sk(sk); in tcp_resume_early_retransmit()
3006 tcp_rearm_rto(sk); in tcp_resume_early_retransmit()
3012 tcp_enter_recovery(sk, false); in tcp_resume_early_retransmit()
3013 tcp_update_scoreboard(sk, 1); in tcp_resume_early_retransmit()
3014 tcp_xmit_retransmit_queue(sk); in tcp_resume_early_retransmit()
3018 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3020 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked()
3026 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3038 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3044 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) in tcp_ack_tstamp()
3049 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) in tcp_ack_tstamp()
3050 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3057 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, in tcp_clean_rtx_queue() argument
3060 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3062 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue()
3075 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3080 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3088 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3139 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3140 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3159 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us); in tcp_clean_rtx_queue()
3163 = inet_csk(sk)->icsk_ca_ops; in tcp_clean_rtx_queue()
3165 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3168 tcp_mtup_probe_success(sk); in tcp_clean_rtx_queue()
3172 tcp_remove_reno_sacks(sk, pkts_acked); in tcp_clean_rtx_queue()
3178 tcp_update_reordering(sk, tp->fackets_out - reord, 0); in tcp_clean_rtx_queue()
3189 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); in tcp_clean_rtx_queue()
3198 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3206 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3227 static void tcp_ack_probe(struct sock *sk) in tcp_ack_probe() argument
3229 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe()
3230 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe()
3234 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3236 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); in tcp_ack_probe()
3243 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_ack_probe()
3248 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) in tcp_ack_is_dubious() argument
3251 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; in tcp_ack_is_dubious()
3255 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) in tcp_may_raise_cwnd() argument
3257 if (tcp_in_cwnd_reduction(sk)) in tcp_may_raise_cwnd()
3266 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) in tcp_may_raise_cwnd()
3311 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3314 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window()
3332 tcp_fast_path_check(sk); in tcp_ack_update_window()
3336 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); in tcp_ack_update_window()
3377 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) in tcp_send_challenge_ack() argument
3382 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack()
3386 if (tcp_oow_rate_limited(sock_net(sk), skb, in tcp_send_challenge_ack()
3398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); in tcp_send_challenge_ack()
3399 tcp_send_ack(sk); in tcp_send_challenge_ack()
3429 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) in tcp_process_tlp_ack() argument
3431 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack()
3443 tcp_init_cwnd_reduction(sk); in tcp_process_tlp_ack()
3444 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_process_tlp_ack()
3445 tcp_end_cwnd_reduction(sk); in tcp_process_tlp_ack()
3446 tcp_try_keep_open(sk); in tcp_process_tlp_ack()
3447 NET_INC_STATS_BH(sock_net(sk), in tcp_process_tlp_ack()
3456 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) in tcp_in_ack_event() argument
3458 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event()
3461 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3465 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3467 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack()
3468 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack()
3480 prefetchw(sk->sk_write_queue.next); in tcp_ack()
3488 tcp_send_challenge_ack(sk, skb); in tcp_ack()
3502 tcp_rearm_rto(sk); in tcp_ack()
3526 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); in tcp_ack()
3528 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
3537 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3540 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3551 tcp_in_ack_event(sk, ack_ev_flags); in tcp_ack()
3557 sk->sk_err_soft = 0; in tcp_ack()
3565 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, in tcp_ack()
3570 if (tcp_may_raise_cwnd(sk, flag)) in tcp_ack()
3571 tcp_cong_avoid(sk, ack, acked); in tcp_ack()
3573 if (tcp_ack_is_dubious(sk, flag)) { in tcp_ack()
3575 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3579 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3582 struct dst_entry *dst = __sk_dst_get(sk); in tcp_ack()
3588 tcp_schedule_loss_probe(sk); in tcp_ack()
3589 tcp_update_pacing_rate(sk); in tcp_ack()
3595 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3601 if (tcp_send_head(sk)) in tcp_ack()
3602 tcp_ack_probe(sk); in tcp_ack()
3605 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3609 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3617 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3619 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3623 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3864 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3866 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack()
3881 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
3884 static inline bool tcp_paws_discard(const struct sock *sk, in tcp_paws_discard() argument
3887 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard()
3890 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
3913 void tcp_reset(struct sock *sk) in tcp_reset() argument
3916 switch (sk->sk_state) { in tcp_reset()
3918 sk->sk_err = ECONNREFUSED; in tcp_reset()
3921 sk->sk_err = EPIPE; in tcp_reset()
3926 sk->sk_err = ECONNRESET; in tcp_reset()
3931 if (!sock_flag(sk, SOCK_DEAD)) in tcp_reset()
3932 sk->sk_error_report(sk); in tcp_reset()
3934 tcp_done(sk); in tcp_reset()
3951 static void tcp_fin(struct sock *sk) in tcp_fin() argument
3953 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin()
3956 inet_csk_schedule_ack(sk); in tcp_fin()
3958 sk->sk_shutdown |= RCV_SHUTDOWN; in tcp_fin()
3959 sock_set_flag(sk, SOCK_DONE); in tcp_fin()
3961 switch (sk->sk_state) { in tcp_fin()
3965 tcp_set_state(sk, TCP_CLOSE_WAIT); in tcp_fin()
3966 dst = __sk_dst_get(sk); in tcp_fin()
3968 inet_csk(sk)->icsk_ack.pingpong = 1; in tcp_fin()
3986 tcp_send_ack(sk); in tcp_fin()
3987 tcp_set_state(sk, TCP_CLOSING); in tcp_fin()
3991 tcp_send_ack(sk); in tcp_fin()
3992 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_fin()
3999 __func__, sk->sk_state); in tcp_fin()
4009 sk_mem_reclaim(sk); in tcp_fin()
4011 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_fin()
4012 sk->sk_state_change(sk); in tcp_fin()
4015 if (sk->sk_shutdown == SHUTDOWN_MASK || in tcp_fin()
4016 sk->sk_state == TCP_CLOSE) in tcp_fin()
4017 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); in tcp_fin()
4019 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in tcp_fin()
4036 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_set() argument
4038 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set()
4048 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_dsack_set()
4056 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_extend() argument
4058 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend()
4061 tcp_dsack_set(sk, seq, end_seq); in tcp_dsack_extend()
4066 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
4068 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack()
4072 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
4073 tcp_enter_quickack_mode(sk); in tcp_send_dupack()
4080 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4084 tcp_send_ack(sk); in tcp_send_dupack()
4115 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) in tcp_sack_new_ofo_skb() argument
4117 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb()
4204 static bool tcp_try_coalesce(struct sock *sk, in tcp_try_coalesce() argument
4220 atomic_add(delta, &sk->sk_rmem_alloc); in tcp_try_coalesce()
4221 sk_mem_charge(sk, delta); in tcp_try_coalesce()
4222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4232 static void tcp_ofo_queue(struct sock *sk) in tcp_ofo_queue() argument
4234 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue()
4247 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4252 SOCK_DEBUG(sk, "ofo packet was already received\n"); in tcp_ofo_queue()
4256 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", in tcp_ofo_queue()
4260 tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_ofo_queue()
4261 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); in tcp_ofo_queue()
4264 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4266 tcp_fin(sk); in tcp_ofo_queue()
4272 static bool tcp_prune_ofo_queue(struct sock *sk);
4273 static int tcp_prune_queue(struct sock *sk);
4275 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4278 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule()
4279 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4281 if (tcp_prune_queue(sk) < 0) in tcp_try_rmem_schedule()
4284 if (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4285 if (!tcp_prune_ofo_queue(sk)) in tcp_try_rmem_schedule()
4288 if (!sk_rmem_schedule(sk, skb, size)) in tcp_try_rmem_schedule()
4295 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4297 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo()
4303 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4311 inet_csk_schedule_ack(sk); in tcp_data_queue_ofo()
4313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4314 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", in tcp_data_queue_ofo()
4336 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { in tcp_data_queue_ofo()
4339 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4368 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4371 tcp_dsack_set(sk, seq, end_seq); in tcp_data_queue_ofo()
4376 tcp_dsack_set(sk, seq, in tcp_data_queue_ofo()
4400 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4405 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4407 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4413 tcp_sack_new_ofo_skb(sk, seq, end_seq); in tcp_data_queue_ofo()
4416 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4417 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4421 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4425 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_queue_rcv()
4429 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; in tcp_queue_rcv()
4430 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); in tcp_queue_rcv()
4432 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4433 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4438 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) in tcp_send_rcvq() argument
4456 &err, sk->sk_allocation); in tcp_send_rcvq()
4464 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4471 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4473 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4475 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4488 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4490 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue()
4515 sock_owned_by_user(sk) && !tp->urg_data) { in tcp_data_queue()
4526 tcp_rcv_space_adjust(sk); in tcp_data_queue()
4534 tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4537 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4541 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4543 tcp_fin(sk); in tcp_data_queue()
4546 tcp_ofo_queue(sk); in tcp_data_queue()
4552 inet_csk(sk)->icsk_ack.pingpong = 0; in tcp_data_queue()
4558 tcp_fast_path_check(sk); in tcp_data_queue()
4562 if (!sock_flag(sk, SOCK_DEAD)) in tcp_data_queue()
4563 sk->sk_data_ready(sk); in tcp_data_queue()
4569 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
4570 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4573 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4574 inet_csk_schedule_ack(sk); in tcp_data_queue()
4584 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4588 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", in tcp_data_queue()
4592 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4602 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4605 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4615 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
4629 tcp_collapse(struct sock *sk, struct sk_buff_head *list, in tcp_collapse() argument
4646 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4691 skb_set_owner_r(nskb, sk); in tcp_collapse()
4708 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4721 static void tcp_collapse_ofo_queue(struct sock *sk) in tcp_collapse_ofo_queue() argument
4723 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue()
4747 tcp_collapse(sk, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
4768 static bool tcp_prune_ofo_queue(struct sock *sk) in tcp_prune_ofo_queue() argument
4770 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue()
4774 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
4784 sk_mem_reclaim(sk); in tcp_prune_ofo_queue()
4797 static int tcp_prune_queue(struct sock *sk) in tcp_prune_queue() argument
4799 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue()
4801 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
4803 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
4805 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in tcp_prune_queue()
4806 tcp_clamp_window(sk); in tcp_prune_queue()
4807 else if (sk_under_memory_pressure(sk)) in tcp_prune_queue()
4810 tcp_collapse_ofo_queue(sk); in tcp_prune_queue()
4811 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_prune_queue()
4812 tcp_collapse(sk, &sk->sk_receive_queue, in tcp_prune_queue()
4813 skb_peek(&sk->sk_receive_queue), in tcp_prune_queue()
4816 sk_mem_reclaim(sk); in tcp_prune_queue()
4818 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4824 tcp_prune_ofo_queue(sk); in tcp_prune_queue()
4826 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4833 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
4840 static bool tcp_should_expand_sndbuf(const struct sock *sk) in tcp_should_expand_sndbuf() argument
4842 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf()
4847 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in tcp_should_expand_sndbuf()
4851 if (sk_under_memory_pressure(sk)) in tcp_should_expand_sndbuf()
4855 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) in tcp_should_expand_sndbuf()
4871 static void tcp_new_space(struct sock *sk) in tcp_new_space() argument
4873 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space()
4875 if (tcp_should_expand_sndbuf(sk)) { in tcp_new_space()
4876 tcp_sndbuf_expand(sk); in tcp_new_space()
4880 sk->sk_write_space(sk); in tcp_new_space()
4883 static void tcp_check_space(struct sock *sk) in tcp_check_space() argument
4885 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { in tcp_check_space()
4886 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_check_space()
4889 if (sk->sk_socket && in tcp_check_space()
4890 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) in tcp_check_space()
4891 tcp_new_space(sk); in tcp_check_space()
4895 static inline void tcp_data_snd_check(struct sock *sk) in tcp_data_snd_check() argument
4897 tcp_push_pending_frames(sk); in tcp_data_snd_check()
4898 tcp_check_space(sk); in tcp_data_snd_check()
4904 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) in __tcp_ack_snd_check() argument
4906 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check()
4909 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
4913 __tcp_select_window(sk) >= tp->rcv_wnd) || in __tcp_ack_snd_check()
4915 tcp_in_quickack_mode(sk) || in __tcp_ack_snd_check()
4919 tcp_send_ack(sk); in __tcp_ack_snd_check()
4922 tcp_send_delayed_ack(sk); in __tcp_ack_snd_check()
4926 static inline void tcp_ack_snd_check(struct sock *sk) in tcp_ack_snd_check() argument
4928 if (!inet_csk_ack_scheduled(sk)) { in tcp_ack_snd_check()
4932 __tcp_ack_snd_check(sk, 1); in tcp_ack_snd_check()
4945 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) in tcp_check_urg() argument
4947 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg()
4976 sk_send_sigurg(sk); in tcp_check_urg()
4994 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
4995 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg()
4998 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
5011 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
5013 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg()
5017 tcp_check_urg(sk, th); in tcp_urg()
5030 if (!sock_flag(sk, SOCK_DEAD)) in tcp_urg()
5031 sk->sk_data_ready(sk); in tcp_urg()
5036 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) in tcp_copy_to_iovec() argument
5038 struct tcp_sock *tp = tcp_sk(sk); in tcp_copy_to_iovec()
5051 tcp_rcv_space_adjust(sk); in tcp_copy_to_iovec()
5058 static __sum16 __tcp_checksum_complete_user(struct sock *sk, in __tcp_checksum_complete_user() argument
5063 if (sock_owned_by_user(sk)) { in __tcp_checksum_complete_user()
5073 static inline bool tcp_checksum_complete_user(struct sock *sk, in tcp_checksum_complete_user() argument
5077 __tcp_checksum_complete_user(sk, skb); in tcp_checksum_complete_user()
5083 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5086 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming()
5090 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5092 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5093 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5096 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5113 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5116 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5130 tcp_reset(sk); in tcp_validate_incoming()
5132 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5144 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5145 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5146 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5180 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5183 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established()
5185 if (unlikely(!sk->sk_rx_dst)) in tcp_rcv_established()
5186 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5255 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5257 tcp_data_snd_check(sk); in tcp_rcv_established()
5260 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5270 sock_owned_by_user(sk)) { in tcp_rcv_established()
5273 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { in tcp_rcv_established()
5284 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5288 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); in tcp_rcv_established()
5293 if (tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5296 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5308 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5313 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5317 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5321 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5322 tcp_data_snd_check(sk); in tcp_rcv_established()
5323 if (!inet_csk_ack_scheduled(sk)) in tcp_rcv_established()
5327 __tcp_ack_snd_check(sk, 0); in tcp_rcv_established()
5331 sk->sk_data_ready(sk); in tcp_rcv_established()
5337 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5347 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5351 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5354 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5357 tcp_urg(sk, skb, th); in tcp_rcv_established()
5360 tcp_data_queue(sk, skb); in tcp_rcv_established()
5362 tcp_data_snd_check(sk); in tcp_rcv_established()
5363 tcp_ack_snd_check(sk); in tcp_rcv_established()
5367 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5368 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5375 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5377 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect()
5378 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect()
5380 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_finish_connect()
5383 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5384 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5388 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5390 tcp_init_metrics(sk); in tcp_finish_connect()
5392 tcp_init_congestion_control(sk); in tcp_finish_connect()
5399 tcp_init_buffer_space(sk); in tcp_finish_connect()
5401 if (sock_flag(sk, SOCK_KEEPOPEN)) in tcp_finish_connect()
5402 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5409 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_finish_connect()
5410 sk->sk_state_change(sk); in tcp_finish_connect()
5411 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_finish_connect()
5415 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, in tcp_rcv_fastopen_synack() argument
5418 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack()
5419 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5451 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); in tcp_rcv_fastopen_synack()
5454 tcp_for_write_queue_from(data, sk) { in tcp_rcv_fastopen_synack()
5455 if (data == tcp_send_head(sk) || in tcp_rcv_fastopen_synack()
5456 __tcp_retransmit_skb(sk, data)) in tcp_rcv_fastopen_synack()
5459 tcp_rearm_rto(sk); in tcp_rcv_fastopen_synack()
5460 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); in tcp_rcv_fastopen_synack()
5465 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); in tcp_rcv_fastopen_synack()
5469 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5472 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process()
5473 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process()
5497 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); in tcp_rcv_synsent_state_process()
5510 tcp_reset(sk); in tcp_rcv_synsent_state_process()
5534 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5565 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5566 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5567 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5576 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5579 tcp_rcv_fastopen_synack(sk, skb, &foc)) in tcp_rcv_synsent_state_process()
5582 if (sk->sk_write_pending || in tcp_rcv_synsent_state_process()
5592 inet_csk_schedule_ack(sk); in tcp_rcv_synsent_state_process()
5594 tcp_enter_quickack_mode(sk); in tcp_rcv_synsent_state_process()
5595 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_rcv_synsent_state_process()
5602 tcp_send_ack(sk); in tcp_rcv_synsent_state_process()
5629 tcp_set_state(sk, TCP_SYN_RECV); in tcp_rcv_synsent_state_process()
5653 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5654 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5655 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5657 tcp_send_synack(sk); in tcp_rcv_synsent_state_process()
5697 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_state_process() argument
5700 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process()
5701 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process()
5709 switch (sk->sk_state) { in tcp_rcv_state_process()
5723 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5749 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); in tcp_rcv_state_process()
5754 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5756 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5762 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_rcv_state_process()
5763 sk->sk_state != TCP_FIN_WAIT1); in tcp_rcv_state_process()
5765 if (!tcp_check_req(sk, skb, req, true)) in tcp_rcv_state_process()
5772 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5776 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5779 switch (sk->sk_state) { in tcp_rcv_state_process()
5790 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5794 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()
5795 tcp_init_congestion_control(sk); in tcp_rcv_state_process()
5797 tcp_mtup_init(sk); in tcp_rcv_state_process()
5799 tcp_init_buffer_space(sk); in tcp_rcv_state_process()
5802 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_rcv_state_process()
5803 sk->sk_state_change(sk); in tcp_rcv_state_process()
5809 if (sk->sk_socket) in tcp_rcv_state_process()
5810 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_rcv_state_process()
5815 tcp_synack_rtt_meas(sk, synack_stamp); in tcp_rcv_state_process()
5829 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5831 tcp_init_metrics(sk); in tcp_rcv_state_process()
5833 tcp_update_pacing_rate(sk); in tcp_rcv_state_process()
5838 tcp_initialize_rcv_mss(sk); in tcp_rcv_state_process()
5861 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5862 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5867 tcp_set_state(sk, TCP_FIN_WAIT2); in tcp_rcv_state_process()
5868 sk->sk_shutdown |= SEND_SHUTDOWN; in tcp_rcv_state_process()
5870 dst = __sk_dst_get(sk); in tcp_rcv_state_process()
5874 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_rcv_state_process()
5876 sk->sk_state_change(sk); in tcp_rcv_state_process()
5883 tcp_done(sk); in tcp_rcv_state_process()
5884 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5888 tmo = tcp_fin_time(sk); in tcp_rcv_state_process()
5890 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); in tcp_rcv_state_process()
5891 } else if (th->fin || sock_owned_by_user(sk)) { in tcp_rcv_state_process()
5898 inet_csk_reset_keepalive_timer(sk, tmo); in tcp_rcv_state_process()
5900 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_rcv_state_process()
5908 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_rcv_state_process()
5915 tcp_update_metrics(sk); in tcp_rcv_state_process()
5916 tcp_done(sk); in tcp_rcv_state_process()
5923 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5926 switch (sk->sk_state) { in tcp_rcv_state_process()
5938 if (sk->sk_shutdown & RCV_SHUTDOWN) { in tcp_rcv_state_process()
5941 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5942 tcp_reset(sk); in tcp_rcv_state_process()
5948 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
5954 if (sk->sk_state != TCP_CLOSE) { in tcp_rcv_state_process()
5955 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5956 tcp_ack_snd_check(sk); in tcp_rcv_state_process()
6015 struct sk_buff *skb, const struct sock *sk) in tcp_openreq_init() argument
6035 ireq->ir_mark = inet_request_mark(sk, skb); in tcp_openreq_init()
6061 static bool tcp_syn_flood_action(struct sock *sk, in tcp_syn_flood_action() argument
6073 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); in tcp_syn_flood_action()
6076 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); in tcp_syn_flood_action()
6078 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; in tcp_syn_flood_action()
6089 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
6093 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request()
6107 inet_csk_reqsk_queue_is_full(sk)) && !isn) { in tcp_conn_request()
6108 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
6119 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { in tcp_conn_request()
6120 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
6124 req = inet_reqsk_alloc(rsk_ops, sk); in tcp_conn_request()
6139 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
6142 inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; in tcp_conn_request()
6144 af_ops->init_req(req, sk, skb); in tcp_conn_request()
6146 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
6162 dst = af_ops->route_req(sk, &fl, req, &strict); in tcp_conn_request()
6167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); in tcp_conn_request()
6173 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < in tcp_conn_request()
6192 dst = af_ops->route_req(sk, &fl, req, NULL); in tcp_conn_request()
6197 tcp_ecn_create_request(req, skb, sk, dst); in tcp_conn_request()
6200 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6207 tcp_openreq_init_rwin(req, sk, dst); in tcp_conn_request()
6209 tcp_try_fastopen(sk, skb, req, &foc, dst); in tcp_conn_request()
6210 err = af_ops->send_synack(sk, dst, &fl, req, in tcp_conn_request()
6217 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); in tcp_conn_request()
6227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_conn_request()