root/include/net/inet_connection_sock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. inet_csk
  2. inet_csk_ca
  3. inet_csk_schedule_ack
  4. inet_csk_ack_scheduled
  5. inet_csk_delack_init
  6. inet_csk_clear_xmit_timer
  7. inet_csk_reset_xmit_timer
  8. inet_csk_rto_backoff
  9. inet_csk_reqsk_queue_added
  10. inet_csk_reqsk_queue_len
  11. inet_csk_reqsk_queue_is_full
  12. inet_csk_listen_poll
  13. inet_csk_enter_pingpong_mode
  14. inet_csk_exit_pingpong_mode
  15. inet_csk_in_pingpong_mode
  16. inet_csk_inc_pingpong_cnt

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * NET          Generic infrastructure for INET connection oriented protocols.
   4  *
   5  *              Definitions for inet_connection_sock 
   6  *
   7  * Authors:     Many people, see the TCP sources
   8  *
   9  *              From code originally in TCP
  10  */
  11 #ifndef _INET_CONNECTION_SOCK_H
  12 #define _INET_CONNECTION_SOCK_H
  13 
  14 #include <linux/compiler.h>
  15 #include <linux/string.h>
  16 #include <linux/timer.h>
  17 #include <linux/poll.h>
  18 #include <linux/kernel.h>
  19 
  20 #include <net/inet_sock.h>
  21 #include <net/request_sock.h>
  22 
  23 /* Cancel timers, when they are not required. */
  24 #undef INET_CSK_CLEAR_TIMERS
  25 
  26 struct inet_bind_bucket;
  27 struct tcp_congestion_ops;
  28 
  29 /*
  30  * Pointers to address related TCP functions
  31  * (i.e. things that depend on the address family)
  32  */
  33 struct inet_connection_sock_af_ops {
  34         int         (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
  35         void        (*send_check)(struct sock *sk, struct sk_buff *skb);
  36         int         (*rebuild_header)(struct sock *sk);
  37         void        (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
  38         int         (*conn_request)(struct sock *sk, struct sk_buff *skb);
  39         struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
  40                                       struct request_sock *req,
  41                                       struct dst_entry *dst,
  42                                       struct request_sock *req_unhash,
  43                                       bool *own_req);
  44         u16         net_header_len;
  45         u16         net_frag_header_len;
  46         u16         sockaddr_len;
  47         int         (*setsockopt)(struct sock *sk, int level, int optname,
  48                                   char __user *optval, unsigned int optlen);
  49         int         (*getsockopt)(struct sock *sk, int level, int optname,
  50                                   char __user *optval, int __user *optlen);
  51 #ifdef CONFIG_COMPAT
  52         int         (*compat_setsockopt)(struct sock *sk,
  53                                 int level, int optname,
  54                                 char __user *optval, unsigned int optlen);
  55         int         (*compat_getsockopt)(struct sock *sk,
  56                                 int level, int optname,
  57                                 char __user *optval, int __user *optlen);
  58 #endif
  59         void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
  60         void        (*mtu_reduced)(struct sock *sk);
  61 };
  62 
  63 /** inet_connection_sock - INET connection oriented sock
  64  *
  65  * @icsk_accept_queue:     FIFO of established children
  66  * @icsk_bind_hash:        Bind node
  67  * @icsk_timeout:          Timeout
  68  * @icsk_retransmit_timer: Resend (no ack)
  69  * @icsk_rto:              Retransmit timeout
  70  * @icsk_pmtu_cookie       Last pmtu seen by socket
  71  * @icsk_ca_ops            Pluggable congestion control hook
  72  * @icsk_af_ops            Operations which are AF_INET{4,6} specific
  73  * @icsk_ulp_ops           Pluggable ULP control hook
  74  * @icsk_ulp_data          ULP private data
  75  * @icsk_clean_acked       Clean acked data hook
  76  * @icsk_listen_portaddr_node   hash to the portaddr listener hashtable
  77  * @icsk_ca_state:         Congestion control state
  78  * @icsk_retransmits:      Number of unrecovered [RTO] timeouts
  79  * @icsk_pending:          Scheduled timer event
  80  * @icsk_backoff:          Backoff
  81  * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
  82  * @icsk_probes_out:       unanswered 0 window probes
  83  * @icsk_ext_hdr_len:      Network protocol overhead (IP/IPv6 options)
  84  * @icsk_ack:              Delayed ACK control data
  85  * @icsk_mtup;             MTU probing control data
  86  */
  87 struct inet_connection_sock {
  88         /* inet_sock has to be the first member! */
  89         struct inet_sock          icsk_inet;
  90         struct request_sock_queue icsk_accept_queue;
  91         struct inet_bind_bucket   *icsk_bind_hash;
  92         unsigned long             icsk_timeout;
  93         struct timer_list         icsk_retransmit_timer;
  94         struct timer_list         icsk_delack_timer;
  95         __u32                     icsk_rto;
  96         __u32                     icsk_pmtu_cookie;
  97         const struct tcp_congestion_ops *icsk_ca_ops;
  98         const struct inet_connection_sock_af_ops *icsk_af_ops;
  99         const struct tcp_ulp_ops  *icsk_ulp_ops;
 100         void __rcu                *icsk_ulp_data;
 101         void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
 102         struct hlist_node         icsk_listen_portaddr_node;
 103         unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
 104         __u8                      icsk_ca_state:6,
 105                                   icsk_ca_setsockopt:1,
 106                                   icsk_ca_dst_locked:1;
 107         __u8                      icsk_retransmits;
 108         __u8                      icsk_pending;
 109         __u8                      icsk_backoff;
 110         __u8                      icsk_syn_retries;
 111         __u8                      icsk_probes_out;
 112         __u16                     icsk_ext_hdr_len;
 113         struct {
 114                 __u8              pending;       /* ACK is pending                         */
 115                 __u8              quick;         /* Scheduled number of quick acks         */
 116                 __u8              pingpong;      /* The session is interactive             */
 117                 __u8              blocked;       /* Delayed ACK was blocked by socket lock */
 118                 __u32             ato;           /* Predicted tick of soft clock           */
 119                 unsigned long     timeout;       /* Currently scheduled timeout            */
 120                 __u32             lrcvtime;      /* timestamp of last received data packet */
 121                 __u16             last_seg_size; /* Size of last incoming segment          */
 122                 __u16             rcv_mss;       /* MSS used for delayed ACK decisions     */
 123         } icsk_ack;
 124         struct {
 125                 int               enabled;
 126 
 127                 /* Range of MTUs to search */
 128                 int               search_high;
 129                 int               search_low;
 130 
 131                 /* Information on the current probe. */
 132                 int               probe_size;
 133 
 134                 u32               probe_timestamp;
 135         } icsk_mtup;
 136         u32                       icsk_user_timeout;
 137 
 138         u64                       icsk_ca_priv[104 / sizeof(u64)];
 139 #define ICSK_CA_PRIV_SIZE      (13 * sizeof(u64))
 140 };
 141 
 142 #define ICSK_TIME_RETRANS       1       /* Retransmit timer */
 143 #define ICSK_TIME_DACK          2       /* Delayed ack timer */
 144 #define ICSK_TIME_PROBE0        3       /* Zero window probe timer */
 145 #define ICSK_TIME_EARLY_RETRANS 4       /* Early retransmit timer */
 146 #define ICSK_TIME_LOSS_PROBE    5       /* Tail loss probe timer */
 147 #define ICSK_TIME_REO_TIMEOUT   6       /* Reordering timer */
 148 
 149 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
 150 {
 151         return (struct inet_connection_sock *)sk;
 152 }
 153 
 154 static inline void *inet_csk_ca(const struct sock *sk)
 155 {
 156         return (void *)inet_csk(sk)->icsk_ca_priv;
 157 }
 158 
 159 struct sock *inet_csk_clone_lock(const struct sock *sk,
 160                                  const struct request_sock *req,
 161                                  const gfp_t priority);
 162 
 163 enum inet_csk_ack_state_t {
 164         ICSK_ACK_SCHED  = 1,
 165         ICSK_ACK_TIMER  = 2,
 166         ICSK_ACK_PUSHED = 4,
 167         ICSK_ACK_PUSHED2 = 8,
 168         ICSK_ACK_NOW = 16       /* Send the next ACK immediately (once) */
 169 };
 170 
 171 void inet_csk_init_xmit_timers(struct sock *sk,
 172                                void (*retransmit_handler)(struct timer_list *),
 173                                void (*delack_handler)(struct timer_list *),
 174                                void (*keepalive_handler)(struct timer_list *));
 175 void inet_csk_clear_xmit_timers(struct sock *sk);
 176 
 177 static inline void inet_csk_schedule_ack(struct sock *sk)
 178 {
 179         inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
 180 }
 181 
 182 static inline int inet_csk_ack_scheduled(const struct sock *sk)
 183 {
 184         return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
 185 }
 186 
 187 static inline void inet_csk_delack_init(struct sock *sk)
 188 {
 189         memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 190 }
 191 
 192 void inet_csk_delete_keepalive_timer(struct sock *sk);
 193 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 194 
 195 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
 196 {
 197         struct inet_connection_sock *icsk = inet_csk(sk);
 198 
 199         if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
 200                 icsk->icsk_pending = 0;
 201 #ifdef INET_CSK_CLEAR_TIMERS
 202                 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 203 #endif
 204         } else if (what == ICSK_TIME_DACK) {
 205                 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
 206 #ifdef INET_CSK_CLEAR_TIMERS
 207                 sk_stop_timer(sk, &icsk->icsk_delack_timer);
 208 #endif
 209         } else {
 210                 pr_debug("inet_csk BUG: unknown timer value\n");
 211         }
 212 }
 213 
 214 /*
 215  *      Reset the retransmission timer
 216  */
 217 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
 218                                              unsigned long when,
 219                                              const unsigned long max_when)
 220 {
 221         struct inet_connection_sock *icsk = inet_csk(sk);
 222 
 223         if (when > max_when) {
 224                 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
 225                          sk, what, when, (void *)_THIS_IP_);
 226                 when = max_when;
 227         }
 228 
 229         if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
 230             what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
 231             what == ICSK_TIME_REO_TIMEOUT) {
 232                 icsk->icsk_pending = what;
 233                 icsk->icsk_timeout = jiffies + when;
 234                 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
 235         } else if (what == ICSK_TIME_DACK) {
 236                 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
 237                 icsk->icsk_ack.timeout = jiffies + when;
 238                 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
 239         } else {
 240                 pr_debug("inet_csk BUG: unknown timer value\n");
 241         }
 242 }
 243 
 244 static inline unsigned long
 245 inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
 246                      unsigned long max_when)
 247 {
 248         u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
 249 
 250         return (unsigned long)min_t(u64, when, max_when);
 251 }
 252 
 253 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
 254 
 255 int inet_csk_get_port(struct sock *sk, unsigned short snum);
 256 
 257 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
 258                                      const struct request_sock *req);
 259 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 260                                             struct sock *newsk,
 261                                             const struct request_sock *req);
 262 
 263 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
 264                                       struct request_sock *req,
 265                                       struct sock *child);
 266 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 267                                    unsigned long timeout);
 268 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 269                                          struct request_sock *req,
 270                                          bool own_req);
 271 
 272 static inline void inet_csk_reqsk_queue_added(struct sock *sk)
 273 {
 274         reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
 275 }
 276 
 277 static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
 278 {
 279         return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
 280 }
 281 
 282 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 283 {
 284         return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 285 }
 286 
 287 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
 288 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 289 
 290 void inet_csk_destroy_sock(struct sock *sk);
 291 void inet_csk_prepare_forced_close(struct sock *sk);
 292 
 293 /*
 294  * LISTEN is a special case for poll..
 295  */
 296 static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
 297 {
 298         return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
 299                         (EPOLLIN | EPOLLRDNORM) : 0;
 300 }
 301 
 302 int inet_csk_listen_start(struct sock *sk, int backlog);
 303 void inet_csk_listen_stop(struct sock *sk);
 304 
 305 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 306 
 307 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 308                                char __user *optval, int __user *optlen);
 309 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 310                                char __user *optval, unsigned int optlen);
 311 
 312 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 313 
 314 #define TCP_PINGPONG_THRESH     3
 315 
 316 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
 317 {
 318         inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
 319 }
 320 
 321 static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
 322 {
 323         inet_csk(sk)->icsk_ack.pingpong = 0;
 324 }
 325 
 326 static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
 327 {
 328         return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
 329 }
 330 
 331 static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
 332 {
 333         struct inet_connection_sock *icsk = inet_csk(sk);
 334 
 335         if (icsk->icsk_ack.pingpong < U8_MAX)
 336                 icsk->icsk_ack.pingpong++;
 337 }
 338 #endif /* _INET_CONNECTION_SOCK_H */

/* [<][>][^][v][top][bottom][index][help] */