root/net/ipv4/tcp_timer.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tcp_clamp_rto_to_user_timeout
  2. tcp_write_err
  3. tcp_out_of_resources
  4. tcp_orphan_retries
  5. tcp_mtu_probing
  6. tcp_model_timeout
  7. retransmits_timed_out
  8. tcp_write_timeout
  9. tcp_delack_timer_handler
  10. tcp_delack_timer
  11. tcp_probe_timer
  12. tcp_fastopen_synack_timer
  13. tcp_retransmit_timer
  14. tcp_write_timer_handler
  15. tcp_write_timer
  16. tcp_syn_ack_timeout
  17. tcp_set_keepalive
  18. tcp_keepalive_timer
  19. tcp_compressed_ack_kick
  20. tcp_init_xmit_timers

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   4  *              operating system.  INET is implemented using the  BSD Socket
   5  *              interface as the means of communication with the user level.
   6  *
   7  *              Implementation of the Transmission Control Protocol(TCP).
   8  *
   9  * Authors:     Ross Biro
  10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  12  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13  *              Florian La Roche, <flla@stud.uni-sb.de>
  14  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  16  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  17  *              Matthew Dillon, <dillon@apollo.west.oic.com>
  18  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19  *              Jorge Cwik, <jorge@laser.satlink.net>
  20  */
  21 
  22 #include <linux/module.h>
  23 #include <linux/gfp.h>
  24 #include <net/tcp.h>
  25 
  26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
  27 {
  28         struct inet_connection_sock *icsk = inet_csk(sk);
  29         u32 elapsed, start_ts;
  30         s32 remaining;
  31 
  32         start_ts = tcp_sk(sk)->retrans_stamp;
  33         if (!icsk->icsk_user_timeout)
  34                 return icsk->icsk_rto;
  35         elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
  36         remaining = icsk->icsk_user_timeout - elapsed;
  37         if (remaining <= 0)
  38                 return 1; /* user timeout has passed; fire ASAP */
  39 
  40         return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
  41 }
  42 
  43 /**
  44  *  tcp_write_err() - close socket and save error info
  45  *  @sk:  The socket the error has appeared on.
  46  *
  47  *  Returns: Nothing (void)
  48  */
  49 
  50 static void tcp_write_err(struct sock *sk)
  51 {
  52         sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  53         sk->sk_error_report(sk);
  54 
  55         tcp_write_queue_purge(sk);
  56         tcp_done(sk);
  57         __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  58 }
  59 
  60 /**
  61  *  tcp_out_of_resources() - Close socket if out of resources
  62  *  @sk:        pointer to current socket
  63  *  @do_reset:  send a last packet with reset flag
  64  *
  65  *  Do not allow orphaned sockets to eat all our resources.
  66  *  This is direct violation of TCP specs, but it is required
  67  *  to prevent DoS attacks. It is called when a retransmission timeout
  68  *  or zero probe timeout occurs on orphaned socket.
  69  *
  70  *  Also close if our net namespace is exiting; in that case there is no
  71  *  hope of ever communicating again since all netns interfaces are already
  72  *  down (or about to be down), and we need to release our dst references,
  73  *  which have been moved to the netns loopback interface, so the namespace
  74  *  can finish exiting.  This condition is only possible if we are a kernel
  75  *  socket, as those do not hold references to the namespace.
  76  *
  77  *  Criteria is still not confirmed experimentally and may change.
  78  *  We kill the socket, if:
  79  *  1. If number of orphaned sockets exceeds an administratively configured
  80  *     limit.
  81  *  2. If we have strong memory pressure.
  82  *  3. If our net namespace is exiting.
  83  */
  84 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  85 {
  86         struct tcp_sock *tp = tcp_sk(sk);
  87         int shift = 0;
  88 
  89         /* If peer does not open window for long time, or did not transmit
  90          * anything for long time, penalize it. */
  91         if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  92                 shift++;
  93 
  94         /* If some dubious ICMP arrived, penalize even more. */
  95         if (sk->sk_err_soft)
  96                 shift++;
  97 
  98         if (tcp_check_oom(sk, shift)) {
  99                 /* Catch exceptional cases, when connection requires reset.
 100                  *      1. Last segment was sent recently. */
 101                 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
 102                     /*  2. Window is closed. */
 103                     (!tp->snd_wnd && !tp->packets_out))
 104                         do_reset = true;
 105                 if (do_reset)
 106                         tcp_send_active_reset(sk, GFP_ATOMIC);
 107                 tcp_done(sk);
 108                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
 109                 return 1;
 110         }
 111 
 112         if (!check_net(sock_net(sk))) {
 113                 /* Not possible to send reset; just close */
 114                 tcp_done(sk);
 115                 return 1;
 116         }
 117 
 118         return 0;
 119 }
 120 
 121 /**
 122  *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
 123  *  @sk:    Pointer to the current socket.
 124  *  @alive: bool, socket alive state
 125  */
 126 static int tcp_orphan_retries(struct sock *sk, bool alive)
 127 {
 128         int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 129 
 130         /* We know from an ICMP that something is wrong. */
 131         if (sk->sk_err_soft && !alive)
 132                 retries = 0;
 133 
 134         /* However, if socket sent something recently, select some safe
 135          * number of retries. 8 corresponds to >100 seconds with minimal
 136          * RTO of 200msec. */
 137         if (retries == 0 && alive)
 138                 retries = 8;
 139         return retries;
 140 }
 141 
 142 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 143 {
 144         const struct net *net = sock_net(sk);
 145         int mss;
 146 
 147         /* Black hole detection */
 148         if (!net->ipv4.sysctl_tcp_mtu_probing)
 149                 return;
 150 
 151         if (!icsk->icsk_mtup.enabled) {
 152                 icsk->icsk_mtup.enabled = 1;
 153                 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
 154         } else {
 155                 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
 156                 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
 157                 mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
 158                 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
 159                 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
 160         }
 161         tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 162 }
 163 
 164 static unsigned int tcp_model_timeout(struct sock *sk,
 165                                       unsigned int boundary,
 166                                       unsigned int rto_base)
 167 {
 168         unsigned int linear_backoff_thresh, timeout;
 169 
 170         linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
 171         if (boundary <= linear_backoff_thresh)
 172                 timeout = ((2 << boundary) - 1) * rto_base;
 173         else
 174                 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 175                         (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 176         return jiffies_to_msecs(timeout);
 177 }
 178 /**
 179  *  retransmits_timed_out() - returns true if this connection has timed out
 180  *  @sk:       The current socket
 181  *  @boundary: max number of retransmissions
 182  *  @timeout:  A custom timeout value.
 183  *             If set to 0 the default timeout is calculated and used.
 184  *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
 185  *
 186  * The default "timeout" value this function can calculate and use
 187  * is equivalent to the timeout of a TCP Connection
 188  * after "boundary" unsuccessful, exponentially backed-off
 189  * retransmissions with an initial RTO of TCP_RTO_MIN.
 190  */
 191 static bool retransmits_timed_out(struct sock *sk,
 192                                   unsigned int boundary,
 193                                   unsigned int timeout)
 194 {
 195         unsigned int start_ts;
 196 
 197         if (!inet_csk(sk)->icsk_retransmits)
 198                 return false;
 199 
 200         start_ts = tcp_sk(sk)->retrans_stamp;
 201         if (likely(timeout == 0)) {
 202                 unsigned int rto_base = TCP_RTO_MIN;
 203 
 204                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 205                         rto_base = tcp_timeout_init(sk);
 206                 timeout = tcp_model_timeout(sk, boundary, rto_base);
 207         }
 208 
 209         return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 210 }
 211 
 212 /* A write timeout has occurred. Process the after effects. */
 213 static int tcp_write_timeout(struct sock *sk)
 214 {
 215         struct inet_connection_sock *icsk = inet_csk(sk);
 216         struct tcp_sock *tp = tcp_sk(sk);
 217         struct net *net = sock_net(sk);
 218         bool expired = false, do_reset;
 219         int retry_until;
 220 
 221         if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 222                 if (icsk->icsk_retransmits) {
 223                         dst_negative_advice(sk);
 224                 } else {
 225                         sk_rethink_txhash(sk);
 226                 }
 227                 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
 228                 expired = icsk->icsk_retransmits >= retry_until;
 229         } else {
 230                 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
 231                         /* Black hole detection */
 232                         tcp_mtu_probing(icsk, sk);
 233 
 234                         dst_negative_advice(sk);
 235                 } else {
 236                         sk_rethink_txhash(sk);
 237                 }
 238 
 239                 retry_until = net->ipv4.sysctl_tcp_retries2;
 240                 if (sock_flag(sk, SOCK_DEAD)) {
 241                         const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 242 
 243                         retry_until = tcp_orphan_retries(sk, alive);
 244                         do_reset = alive ||
 245                                 !retransmits_timed_out(sk, retry_until, 0);
 246 
 247                         if (tcp_out_of_resources(sk, do_reset))
 248                                 return 1;
 249                 }
 250         }
 251         if (!expired)
 252                 expired = retransmits_timed_out(sk, retry_until,
 253                                                 icsk->icsk_user_timeout);
 254         tcp_fastopen_active_detect_blackhole(sk, expired);
 255 
 256         if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
 257                 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
 258                                   icsk->icsk_retransmits,
 259                                   icsk->icsk_rto, (int)expired);
 260 
 261         if (expired) {
 262                 /* Has it gone just too far? */
 263                 tcp_write_err(sk);
 264                 return 1;
 265         }
 266 
 267         return 0;
 268 }
 269 
 270 /* Called with BH disabled */
 271 void tcp_delack_timer_handler(struct sock *sk)
 272 {
 273         struct inet_connection_sock *icsk = inet_csk(sk);
 274 
 275         sk_mem_reclaim_partial(sk);
 276 
 277         if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 278             !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
 279                 goto out;
 280 
 281         if (time_after(icsk->icsk_ack.timeout, jiffies)) {
 282                 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
 283                 goto out;
 284         }
 285         icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
 286 
 287         if (inet_csk_ack_scheduled(sk)) {
 288                 if (!inet_csk_in_pingpong_mode(sk)) {
 289                         /* Delayed ACK missed: inflate ATO. */
 290                         icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
 291                 } else {
 292                         /* Delayed ACK missed: leave pingpong mode and
 293                          * deflate ATO.
 294                          */
 295                         inet_csk_exit_pingpong_mode(sk);
 296                         icsk->icsk_ack.ato      = TCP_ATO_MIN;
 297                 }
 298                 tcp_mstamp_refresh(tcp_sk(sk));
 299                 tcp_send_ack(sk);
 300                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 301         }
 302 
 303 out:
 304         if (tcp_under_memory_pressure(sk))
 305                 sk_mem_reclaim(sk);
 306 }
 307 
 308 
 309 /**
 310  *  tcp_delack_timer() - The TCP delayed ACK timeout handler
 311  *  @data:  Pointer to the current socket. (gets casted to struct sock *)
 312  *
 313  *  This function gets (indirectly) called when the kernel timer for a TCP packet
 314  *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
 315  *
 316  *  Returns: Nothing (void)
 317  */
 318 static void tcp_delack_timer(struct timer_list *t)
 319 {
 320         struct inet_connection_sock *icsk =
 321                         from_timer(icsk, t, icsk_delack_timer);
 322         struct sock *sk = &icsk->icsk_inet.sk;
 323 
 324         bh_lock_sock(sk);
 325         if (!sock_owned_by_user(sk)) {
 326                 tcp_delack_timer_handler(sk);
 327         } else {
 328                 icsk->icsk_ack.blocked = 1;
 329                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 330                 /* deleguate our work to tcp_release_cb() */
 331                 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
 332                         sock_hold(sk);
 333         }
 334         bh_unlock_sock(sk);
 335         sock_put(sk);
 336 }
 337 
 338 static void tcp_probe_timer(struct sock *sk)
 339 {
 340         struct inet_connection_sock *icsk = inet_csk(sk);
 341         struct sk_buff *skb = tcp_send_head(sk);
 342         struct tcp_sock *tp = tcp_sk(sk);
 343         int max_probes;
 344 
 345         if (tp->packets_out || !skb) {
 346                 icsk->icsk_probes_out = 0;
 347                 return;
 348         }
 349 
 350         /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
 351          * long as the receiver continues to respond probes. We support this by
 352          * default and reset icsk_probes_out with incoming ACKs. But if the
 353          * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
 354          * kill the socket when the retry count and the time exceeds the
 355          * corresponding system limit. We also implement similar policy when
 356          * we use RTO to probe window in tcp_retransmit_timer().
 357          */
 358         if (icsk->icsk_user_timeout) {
 359                 u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
 360                                                 tcp_probe0_base(sk));
 361 
 362                 if (elapsed >= icsk->icsk_user_timeout)
 363                         goto abort;
 364         }
 365 
 366         max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
 367         if (sock_flag(sk, SOCK_DEAD)) {
 368                 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 369 
 370                 max_probes = tcp_orphan_retries(sk, alive);
 371                 if (!alive && icsk->icsk_backoff >= max_probes)
 372                         goto abort;
 373                 if (tcp_out_of_resources(sk, true))
 374                         return;
 375         }
 376 
 377         if (icsk->icsk_probes_out >= max_probes) {
 378 abort:          tcp_write_err(sk);
 379         } else {
 380                 /* Only send another probe if we didn't close things up. */
 381                 tcp_send_probe0(sk);
 382         }
 383 }
 384 
 385 /*
 386  *      Timer for Fast Open socket to retransmit SYNACK. Note that the
 387  *      sk here is the child socket, not the parent (listener) socket.
 388  */
 389 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
 390 {
 391         struct inet_connection_sock *icsk = inet_csk(sk);
 392         int max_retries = icsk->icsk_syn_retries ? :
 393             sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
 394         struct tcp_sock *tp = tcp_sk(sk);
 395 
 396         req->rsk_ops->syn_ack_timeout(req);
 397 
 398         if (req->num_timeout >= max_retries) {
 399                 tcp_write_err(sk);
 400                 return;
 401         }
 402         /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
 403         if (icsk->icsk_retransmits == 1)
 404                 tcp_enter_loss(sk);
 405         /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
 406          * returned from rtx_syn_ack() to make it more persistent like
 407          * regular retransmit because if the child socket has been accepted
 408          * it's not good to give up too easily.
 409          */
 410         inet_rtx_syn_ack(sk, req);
 411         req->num_timeout++;
 412         icsk->icsk_retransmits++;
 413         if (!tp->retrans_stamp)
 414                 tp->retrans_stamp = tcp_time_stamp(tp);
 415         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 416                           TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 417 }
 418 
 419 
 420 /**
 421  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
 422  *  @sk:  Pointer to the current socket.
 423  *
 424  *  This function gets called when the kernel timer for a TCP packet
 425  *  of this socket expires.
 426  *
 427  *  It handles retransmission, timer adjustment and other necesarry measures.
 428  *
 429  *  Returns: Nothing (void)
 430  */
 431 void tcp_retransmit_timer(struct sock *sk)
 432 {
 433         struct tcp_sock *tp = tcp_sk(sk);
 434         struct net *net = sock_net(sk);
 435         struct inet_connection_sock *icsk = inet_csk(sk);
 436         struct request_sock *req;
 437 
 438         req = rcu_dereference_protected(tp->fastopen_rsk,
 439                                         lockdep_sock_is_held(sk));
 440         if (req) {
 441                 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
 442                              sk->sk_state != TCP_FIN_WAIT1);
 443                 tcp_fastopen_synack_timer(sk, req);
 444                 /* Before we receive ACK to our SYN-ACK don't retransmit
 445                  * anything else (e.g., data or FIN segments).
 446                  */
 447                 return;
 448         }
 449         if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
 450                 return;
 451 
 452         tp->tlp_high_seq = 0;
 453 
 454         if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
 455             !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
 456                 /* Receiver dastardly shrinks window. Our retransmits
 457                  * become zero probes, but we should not timeout this
 458                  * connection. If the socket is an orphan, time it out,
 459                  * we cannot allow such beasts to hang infinitely.
 460                  */
 461                 struct inet_sock *inet = inet_sk(sk);
 462                 if (sk->sk_family == AF_INET) {
 463                         net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 464                                             &inet->inet_daddr,
 465                                             ntohs(inet->inet_dport),
 466                                             inet->inet_num,
 467                                             tp->snd_una, tp->snd_nxt);
 468                 }
 469 #if IS_ENABLED(CONFIG_IPV6)
 470                 else if (sk->sk_family == AF_INET6) {
 471                         net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 472                                             &sk->sk_v6_daddr,
 473                                             ntohs(inet->inet_dport),
 474                                             inet->inet_num,
 475                                             tp->snd_una, tp->snd_nxt);
 476                 }
 477 #endif
 478                 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
 479                         tcp_write_err(sk);
 480                         goto out;
 481                 }
 482                 tcp_enter_loss(sk);
 483                 tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
 484                 __sk_dst_reset(sk);
 485                 goto out_reset_timer;
 486         }
 487 
 488         __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
 489         if (tcp_write_timeout(sk))
 490                 goto out;
 491 
 492         if (icsk->icsk_retransmits == 0) {
 493                 int mib_idx = 0;
 494 
 495                 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
 496                         if (tcp_is_sack(tp))
 497                                 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
 498                         else
 499                                 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
 500                 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
 501                         mib_idx = LINUX_MIB_TCPLOSSFAILURES;
 502                 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
 503                            tp->sacked_out) {
 504                         if (tcp_is_sack(tp))
 505                                 mib_idx = LINUX_MIB_TCPSACKFAILURES;
 506                         else
 507                                 mib_idx = LINUX_MIB_TCPRENOFAILURES;
 508                 }
 509                 if (mib_idx)
 510                         __NET_INC_STATS(sock_net(sk), mib_idx);
 511         }
 512 
 513         tcp_enter_loss(sk);
 514 
 515         icsk->icsk_retransmits++;
 516         if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
 517                 /* Retransmission failed because of local congestion,
 518                  * Let senders fight for local resources conservatively.
 519                  */
 520                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 521                                           TCP_RESOURCE_PROBE_INTERVAL,
 522                                           TCP_RTO_MAX);
 523                 goto out;
 524         }
 525 
 526         /* Increase the timeout each time we retransmit.  Note that
 527          * we do not increase the rtt estimate.  rto is initialized
 528          * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
 529          * that doubling rto each time is the least we can get away with.
 530          * In KA9Q, Karn uses this for the first few times, and then
 531          * goes to quadratic.  netBSD doubles, but only goes up to *64,
 532          * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
 533          * defined in the protocol as the maximum possible RTT.  I guess
 534          * we'll have to use something other than TCP to talk to the
 535          * University of Mars.
 536          *
 537          * PAWS allows us longer timeouts and large windows, so once
 538          * implemented ftp to mars will work nicely. We will have to fix
 539          * the 120 second clamps though!
 540          */
 541         icsk->icsk_backoff++;
 542 
 543 out_reset_timer:
 544         /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
 545          * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
 546          * might be increased if the stream oscillates between thin and thick,
 547          * thus the old value might already be too high compared to the value
 548          * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
 549          * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
 550          * exponential backoff behaviour to avoid continue hammering
 551          * linear-timeout retransmissions into a black hole
 552          */
 553         if (sk->sk_state == TCP_ESTABLISHED &&
 554             (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
 555             tcp_stream_is_thin(tp) &&
 556             icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 557                 icsk->icsk_backoff = 0;
 558                 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 559         } else {
 560                 /* Use normal (exponential) backoff */
 561                 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 562         }
 563         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 564                                   tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
 565         if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
 566                 __sk_dst_reset(sk);
 567 
 568 out:;
 569 }
 570 
 571 /* Called with bottom-half processing disabled.
 572    Called by tcp_write_timer() */
 573 void tcp_write_timer_handler(struct sock *sk)
 574 {
 575         struct inet_connection_sock *icsk = inet_csk(sk);
 576         int event;
 577 
 578         if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 579             !icsk->icsk_pending)
 580                 goto out;
 581 
 582         if (time_after(icsk->icsk_timeout, jiffies)) {
 583                 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
 584                 goto out;
 585         }
 586 
 587         tcp_mstamp_refresh(tcp_sk(sk));
 588         event = icsk->icsk_pending;
 589 
 590         switch (event) {
 591         case ICSK_TIME_REO_TIMEOUT:
 592                 tcp_rack_reo_timeout(sk);
 593                 break;
 594         case ICSK_TIME_LOSS_PROBE:
 595                 tcp_send_loss_probe(sk);
 596                 break;
 597         case ICSK_TIME_RETRANS:
 598                 icsk->icsk_pending = 0;
 599                 tcp_retransmit_timer(sk);
 600                 break;
 601         case ICSK_TIME_PROBE0:
 602                 icsk->icsk_pending = 0;
 603                 tcp_probe_timer(sk);
 604                 break;
 605         }
 606 
 607 out:
 608         sk_mem_reclaim(sk);
 609 }
 610 
 611 static void tcp_write_timer(struct timer_list *t)
 612 {
 613         struct inet_connection_sock *icsk =
 614                         from_timer(icsk, t, icsk_retransmit_timer);
 615         struct sock *sk = &icsk->icsk_inet.sk;
 616 
 617         bh_lock_sock(sk);
 618         if (!sock_owned_by_user(sk)) {
 619                 tcp_write_timer_handler(sk);
 620         } else {
 621                 /* delegate our work to tcp_release_cb() */
 622                 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
 623                         sock_hold(sk);
 624         }
 625         bh_unlock_sock(sk);
 626         sock_put(sk);
 627 }
 628 
 629 void tcp_syn_ack_timeout(const struct request_sock *req)
 630 {
 631         struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 632 
 633         __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
 634 }
 635 EXPORT_SYMBOL(tcp_syn_ack_timeout);
 636 
 637 void tcp_set_keepalive(struct sock *sk, int val)
 638 {
 639         if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
 640                 return;
 641 
 642         if (val && !sock_flag(sk, SOCK_KEEPOPEN))
 643                 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
 644         else if (!val)
 645                 inet_csk_delete_keepalive_timer(sk);
 646 }
 647 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
 648 
 649 
 650 static void tcp_keepalive_timer (struct timer_list *t)
 651 {
 652         struct sock *sk = from_timer(sk, t, sk_timer);
 653         struct inet_connection_sock *icsk = inet_csk(sk);
 654         struct tcp_sock *tp = tcp_sk(sk);
 655         u32 elapsed;
 656 
 657         /* Only process if socket is not in use. */
 658         bh_lock_sock(sk);
 659         if (sock_owned_by_user(sk)) {
 660                 /* Try again later. */
 661                 inet_csk_reset_keepalive_timer (sk, HZ/20);
 662                 goto out;
 663         }
 664 
 665         if (sk->sk_state == TCP_LISTEN) {
 666                 pr_err("Hmm... keepalive on a LISTEN ???\n");
 667                 goto out;
 668         }
 669 
 670         tcp_mstamp_refresh(tp);
 671         if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
 672                 if (tp->linger2 >= 0) {
 673                         const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
 674 
 675                         if (tmo > 0) {
 676                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
 677                                 goto out;
 678                         }
 679                 }
 680                 tcp_send_active_reset(sk, GFP_ATOMIC);
 681                 goto death;
 682         }
 683 
 684         if (!sock_flag(sk, SOCK_KEEPOPEN) ||
 685             ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
 686                 goto out;
 687 
 688         elapsed = keepalive_time_when(tp);
 689 
 690         /* It is alive without keepalive 8) */
 691         if (tp->packets_out || !tcp_write_queue_empty(sk))
 692                 goto resched;
 693 
 694         elapsed = keepalive_time_elapsed(tp);
 695 
 696         if (elapsed >= keepalive_time_when(tp)) {
 697                 /* If the TCP_USER_TIMEOUT option is enabled, use that
 698                  * to determine when to timeout instead.
 699                  */
 700                 if ((icsk->icsk_user_timeout != 0 &&
 701                     elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
 702                     icsk->icsk_probes_out > 0) ||
 703                     (icsk->icsk_user_timeout == 0 &&
 704                     icsk->icsk_probes_out >= keepalive_probes(tp))) {
 705                         tcp_send_active_reset(sk, GFP_ATOMIC);
 706                         tcp_write_err(sk);
 707                         goto out;
 708                 }
 709                 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
 710                         icsk->icsk_probes_out++;
 711                         elapsed = keepalive_intvl_when(tp);
 712                 } else {
 713                         /* If keepalive was lost due to local congestion,
 714                          * try harder.
 715                          */
 716                         elapsed = TCP_RESOURCE_PROBE_INTERVAL;
 717                 }
 718         } else {
 719                 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
 720                 elapsed = keepalive_time_when(tp) - elapsed;
 721         }
 722 
 723         sk_mem_reclaim(sk);
 724 
 725 resched:
 726         inet_csk_reset_keepalive_timer (sk, elapsed);
 727         goto out;
 728 
 729 death:
 730         tcp_done(sk);
 731 
 732 out:
 733         bh_unlock_sock(sk);
 734         sock_put(sk);
 735 }
 736 
 737 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
 738 {
 739         struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
 740         struct sock *sk = (struct sock *)tp;
 741 
 742         bh_lock_sock(sk);
 743         if (!sock_owned_by_user(sk)) {
 744                 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
 745                         tcp_send_ack(sk);
 746         } else {
 747                 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
 748                                       &sk->sk_tsq_flags))
 749                         sock_hold(sk);
 750         }
 751         bh_unlock_sock(sk);
 752 
 753         sock_put(sk);
 754 
 755         return HRTIMER_NORESTART;
 756 }
 757 
 758 void tcp_init_xmit_timers(struct sock *sk)
 759 {
 760         inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
 761                                   &tcp_keepalive_timer);
 762         hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
 763                      HRTIMER_MODE_ABS_PINNED_SOFT);
 764         tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
 765 
 766         hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
 767                      HRTIMER_MODE_REL_PINNED_SOFT);
 768         tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
 769 }

/* [<][>][^][v][top][bottom][index][help] */