1/*
2 * NET		Generic infrastructure for Network protocols.
3 *
4 * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 *
6 * 		From code originally in include/net/tcp.h
7 *
8 *		This program is free software; you can redistribute it and/or
9 *		modify it under the terms of the GNU General Public License
10 *		as published by the Free Software Foundation; either version
11 *		2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/tcp.h>
19#include <linux/vmalloc.h>
20
21#include <net/request_sock.h>
22
23/*
24 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
25 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
26 * It would be better to replace it with a global counter for all sockets
27 * but then some measure against one socket starving all other sockets
28 * would be needed.
29 *
30 * The minimum value of it is 128. Experiments with real servers show that
31 * it is absolutely not enough even at 100conn/sec. 256 cures most
32 * of problems.
33 * This value is adjusted to 128 for low memory machines,
34 * and it will increase in proportion to the memory of machine.
35 * Note : Dont forget somaxconn that may limit backlog too.
36 */
37int sysctl_max_syn_backlog = 256;
38EXPORT_SYMBOL(sysctl_max_syn_backlog);
39
40int reqsk_queue_alloc(struct request_sock_queue *queue,
41		      unsigned int nr_table_entries)
42{
43	size_t lopt_size = sizeof(struct listen_sock);
44	struct listen_sock *lopt = NULL;
45
46	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
47	nr_table_entries = max_t(u32, nr_table_entries, 8);
48	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
49	lopt_size += nr_table_entries * sizeof(struct request_sock *);
50
51	if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
52		lopt = kzalloc(lopt_size, GFP_KERNEL |
53					  __GFP_NOWARN |
54					  __GFP_NORETRY);
55	if (!lopt)
56		lopt = vzalloc(lopt_size);
57	if (!lopt)
58		return -ENOMEM;
59
60	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
61	spin_lock_init(&queue->syn_wait_lock);
62	queue->rskq_accept_head = NULL;
63	lopt->nr_table_entries = nr_table_entries;
64	lopt->max_qlen_log = ilog2(nr_table_entries);
65
66	spin_lock_bh(&queue->syn_wait_lock);
67	queue->listen_opt = lopt;
68	spin_unlock_bh(&queue->syn_wait_lock);
69
70	return 0;
71}
72
73void __reqsk_queue_destroy(struct request_sock_queue *queue)
74{
75	/* This is an error recovery path only, no locking needed */
76	kvfree(queue->listen_opt);
77}
78
79static inline struct listen_sock *reqsk_queue_yank_listen_sk(
80		struct request_sock_queue *queue)
81{
82	struct listen_sock *lopt;
83
84	spin_lock_bh(&queue->syn_wait_lock);
85	lopt = queue->listen_opt;
86	queue->listen_opt = NULL;
87	spin_unlock_bh(&queue->syn_wait_lock);
88
89	return lopt;
90}
91
92void reqsk_queue_destroy(struct request_sock_queue *queue)
93{
94	/* make all the listen_opt local to us */
95	struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
96
97	if (listen_sock_qlen(lopt) != 0) {
98		unsigned int i;
99
100		for (i = 0; i < lopt->nr_table_entries; i++) {
101			struct request_sock *req;
102
103			spin_lock_bh(&queue->syn_wait_lock);
104			while ((req = lopt->syn_table[i]) != NULL) {
105				lopt->syn_table[i] = req->dl_next;
106				/* Because of following del_timer_sync(),
107				 * we must release the spinlock here
108				 * or risk a dead lock.
109				 */
110				spin_unlock_bh(&queue->syn_wait_lock);
111				atomic_inc(&lopt->qlen_dec);
112				if (del_timer_sync(&req->rsk_timer))
113					reqsk_put(req);
114				reqsk_put(req);
115				spin_lock_bh(&queue->syn_wait_lock);
116			}
117			spin_unlock_bh(&queue->syn_wait_lock);
118		}
119	}
120
121	if (WARN_ON(listen_sock_qlen(lopt) != 0))
122		pr_err("qlen %u\n", listen_sock_qlen(lopt));
123	kvfree(lopt);
124}
125
126/*
127 * This function is called to set a Fast Open socket's "fastopen_rsk" field
128 * to NULL when a TFO socket no longer needs to access the request_sock.
129 * This happens only after 3WHS has been either completed or aborted (e.g.,
130 * RST is received).
131 *
132 * Before TFO, a child socket is created only after 3WHS is completed,
133 * hence it never needs to access the request_sock. things get a lot more
134 * complex with TFO. A child socket, accepted or not, has to access its
135 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
136 * until 3WHS is either completed or aborted. Afterwards the req will stay
137 * until either the child socket is accepted, or in the rare case when the
138 * listener is closed before the child is accepted.
139 *
140 * In short, a request socket is only freed after BOTH 3WHS has completed
141 * (or aborted) and the child socket has been accepted (or listener closed).
142 * When a child socket is accepted, its corresponding req->sk is set to
143 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
144 * will be used by the code below to determine if a child socket has been
145 * accepted or not, and the check is protected by the fastopenq->lock
146 * described below.
147 *
148 * Note that fastopen_rsk is only accessed from the child socket's context
149 * with its socket lock held. But a request_sock (req) can be accessed by
150 * both its child socket through fastopen_rsk, and a listener socket through
151 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
152 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
153 * only in the rare case when both the listener and the child locks are held,
154 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
155 * The lock also protects other fields such as fastopenq->qlen, which is
156 * decremented by this function when fastopen_rsk is no longer needed.
157 *
158 * Note that another solution was to simply use the existing socket lock
159 * from the listener. But first socket lock is difficult to use. It is not
160 * a simple spin lock - one must consider sock_owned_by_user() and arrange
161 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
162 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
163 * acquire a child's lock while holding listener's socket lock. A corner
164 * case might also exist in tcp_v4_hnd_req() that will trigger this locking
165 * order.
166 *
167 * This function also sets "treq->tfo_listener" to false.
168 * treq->tfo_listener is used by the listener so it is protected by the
169 * fastopenq->lock in this function.
170 */
171void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
172			   bool reset)
173{
174	struct sock *lsk = req->rsk_listener;
175	struct fastopen_queue *fastopenq;
176
177	fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
178
179	tcp_sk(sk)->fastopen_rsk = NULL;
180	spin_lock_bh(&fastopenq->lock);
181	fastopenq->qlen--;
182	tcp_rsk(req)->tfo_listener = false;
183	if (req->sk)	/* the child socket hasn't been accepted yet */
184		goto out;
185
186	if (!reset || lsk->sk_state != TCP_LISTEN) {
187		/* If the listener has been closed don't bother with the
188		 * special RST handling below.
189		 */
190		spin_unlock_bh(&fastopenq->lock);
191		reqsk_put(req);
192		return;
193	}
194	/* Wait for 60secs before removing a req that has triggered RST.
195	 * This is a simple defense against TFO spoofing attack - by
196	 * counting the req against fastopen.max_qlen, and disabling
197	 * TFO when the qlen exceeds max_qlen.
198	 *
199	 * For more details see CoNext'11 "TCP Fast Open" paper.
200	 */
201	req->rsk_timer.expires = jiffies + 60*HZ;
202	if (fastopenq->rskq_rst_head == NULL)
203		fastopenq->rskq_rst_head = req;
204	else
205		fastopenq->rskq_rst_tail->dl_next = req;
206
207	req->dl_next = NULL;
208	fastopenq->rskq_rst_tail = req;
209	fastopenq->qlen++;
210out:
211	spin_unlock_bh(&fastopenq->lock);
212}
213