1/*
2 * net/sched/gen_estimator.c	Simple rate estimator.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *              Jamal Hadi Salim - moved it to net/core and reshulfed
13 *              names to make it usable in general net subsystem.
14 */
15
16#include <asm/uaccess.h>
17#include <linux/bitops.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/jiffies.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/init.h>
33#include <linux/rbtree.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <net/gen_stats.h>
37
38/*
39   This code is NOT intended to be used for statistics collection,
40   its purpose is to provide a base for statistical multiplexing
41   for controlled load service.
42   If you need only statistics, run a user level daemon which
43   periodically reads byte counters.
44
45   Unfortunately, rate estimation is not a very easy task.
46   F.e. I did not find a simple way to estimate the current peak rate
47   and even failed to formulate the problem 8)8)
48
49   So I preferred not to built an estimator into the scheduler,
50   but run this task separately.
51   Ideally, it should be kernel thread(s), but for now it runs
52   from timers, which puts apparent top bounds on the number of rated
53   flows, has minimal overhead on small, but is enough
54   to handle controlled load service, sets of aggregates.
55
56   We measure rate over A=(1<<interval) seconds and evaluate EWMA:
57
58   avrate = avrate*(1-W) + rate*W
59
60   where W is chosen as negative power of 2: W = 2^(-ewma_log)
61
62   The resulting time constant is:
63
64   T = A/(-ln(1-W))
65
66
67   NOTES.
68
69   * avbps is scaled by 2^5, avpps is scaled by 2^10.
70   * both values are reported as 32 bit unsigned values. bps can
71     overflow for fast links : max speed being 34360Mbit/sec
72   * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73     for HZ=100 and HZ=1024 8)), maximal interval
74     is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
75     are too expensive, longer ones can be implemented
76     at user level painlessly.
77 */
78
79#define EST_MAX_INTERVAL	5
80
81struct gen_estimator
82{
83	struct list_head	list;
84	struct gnet_stats_basic_packed	*bstats;
85	struct gnet_stats_rate_est64	*rate_est;
86	spinlock_t		*stats_lock;
87	int			ewma_log;
88	u64			last_bytes;
89	u64			avbps;
90	u32			last_packets;
91	u32			avpps;
92	struct rcu_head		e_rcu;
93	struct rb_node		node;
94	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
95	struct rcu_head		head;
96};
97
98struct gen_estimator_head
99{
100	struct timer_list	timer;
101	struct list_head	list;
102};
103
104static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
105
106/* Protects against NULL dereference */
107static DEFINE_RWLOCK(est_lock);
108
109/* Protects against soft lockup during large deletion */
110static struct rb_root est_root = RB_ROOT;
111static DEFINE_SPINLOCK(est_tree_lock);
112
113static void est_timer(unsigned long arg)
114{
115	int idx = (int)arg;
116	struct gen_estimator *e;
117
118	rcu_read_lock();
119	list_for_each_entry_rcu(e, &elist[idx].list, list) {
120		struct gnet_stats_basic_packed b = {0};
121		u64 brate;
122		u32 rate;
123
124		spin_lock(e->stats_lock);
125		read_lock(&est_lock);
126		if (e->bstats == NULL)
127			goto skip;
128
129		__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
130
131		brate = (b.bytes - e->last_bytes)<<(7 - idx);
132		e->last_bytes = b.bytes;
133		e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134		e->rate_est->bps = (e->avbps+0xF)>>5;
135
136		rate = (b.packets - e->last_packets)<<(12 - idx);
137		e->last_packets = b.packets;
138		e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
139		e->rate_est->pps = (e->avpps+0x1FF)>>10;
140skip:
141		read_unlock(&est_lock);
142		spin_unlock(e->stats_lock);
143	}
144
145	if (!list_empty(&elist[idx].list))
146		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
147	rcu_read_unlock();
148}
149
150static void gen_add_node(struct gen_estimator *est)
151{
152	struct rb_node **p = &est_root.rb_node, *parent = NULL;
153
154	while (*p) {
155		struct gen_estimator *e;
156
157		parent = *p;
158		e = rb_entry(parent, struct gen_estimator, node);
159
160		if (est->bstats > e->bstats)
161			p = &parent->rb_right;
162		else
163			p = &parent->rb_left;
164	}
165	rb_link_node(&est->node, parent, p);
166	rb_insert_color(&est->node, &est_root);
167}
168
169static
170struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
171				    const struct gnet_stats_rate_est64 *rate_est)
172{
173	struct rb_node *p = est_root.rb_node;
174
175	while (p) {
176		struct gen_estimator *e;
177
178		e = rb_entry(p, struct gen_estimator, node);
179
180		if (bstats > e->bstats)
181			p = p->rb_right;
182		else if (bstats < e->bstats || rate_est != e->rate_est)
183			p = p->rb_left;
184		else
185			return e;
186	}
187	return NULL;
188}
189
190/**
191 * gen_new_estimator - create a new rate estimator
192 * @bstats: basic statistics
193 * @rate_est: rate estimator statistics
194 * @stats_lock: statistics lock
195 * @opt: rate estimator configuration TLV
196 *
197 * Creates a new rate estimator with &bstats as source and &rate_est
198 * as destination. A new timer with the interval specified in the
199 * configuration TLV is created. Upon each interval, the latest statistics
200 * will be read from &bstats and the estimated rate will be stored in
201 * &rate_est with the statistics lock grabbed during this period.
202 *
203 * Returns 0 on success or a negative error code.
204 *
205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
208		      struct gnet_stats_rate_est64 *rate_est,
209		      spinlock_t *stats_lock,
210		      struct nlattr *opt)
211{
212	struct gen_estimator *est;
213	struct gnet_estimator *parm = nla_data(opt);
214	struct gnet_stats_basic_packed b = {0};
215	int idx;
216
217	if (nla_len(opt) < sizeof(*parm))
218		return -EINVAL;
219
220	if (parm->interval < -2 || parm->interval > 3)
221		return -EINVAL;
222
223	est = kzalloc(sizeof(*est), GFP_KERNEL);
224	if (est == NULL)
225		return -ENOBUFS;
226
227	__gnet_stats_copy_basic(&b, cpu_bstats, bstats);
228
229	idx = parm->interval + 2;
230	est->bstats = bstats;
231	est->rate_est = rate_est;
232	est->stats_lock = stats_lock;
233	est->ewma_log = parm->ewma_log;
234	est->last_bytes = b.bytes;
235	est->avbps = rate_est->bps<<5;
236	est->last_packets = b.packets;
237	est->avpps = rate_est->pps<<10;
238	est->cpu_bstats = cpu_bstats;
239
240	spin_lock_bh(&est_tree_lock);
241	if (!elist[idx].timer.function) {
242		INIT_LIST_HEAD(&elist[idx].list);
243		setup_timer(&elist[idx].timer, est_timer, idx);
244	}
245
246	if (list_empty(&elist[idx].list))
247		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
248
249	list_add_rcu(&est->list, &elist[idx].list);
250	gen_add_node(est);
251	spin_unlock_bh(&est_tree_lock);
252
253	return 0;
254}
255EXPORT_SYMBOL(gen_new_estimator);
256
257/**
258 * gen_kill_estimator - remove a rate estimator
259 * @bstats: basic statistics
260 * @rate_est: rate estimator statistics
261 *
262 * Removes the rate estimator specified by &bstats and &rate_est.
263 *
264 * Note : Caller should respect an RCU grace period before freeing stats_lock
265 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267			struct gnet_stats_rate_est64 *rate_est)
268{
269	struct gen_estimator *e;
270
271	spin_lock_bh(&est_tree_lock);
272	while ((e = gen_find_node(bstats, rate_est))) {
273		rb_erase(&e->node, &est_root);
274
275		write_lock(&est_lock);
276		e->bstats = NULL;
277		write_unlock(&est_lock);
278
279		list_del_rcu(&e->list);
280		kfree_rcu(e, e_rcu);
281	}
282	spin_unlock_bh(&est_tree_lock);
283}
284EXPORT_SYMBOL(gen_kill_estimator);
285
286/**
287 * gen_replace_estimator - replace rate estimator configuration
288 * @bstats: basic statistics
289 * @rate_est: rate estimator statistics
290 * @stats_lock: statistics lock
291 * @opt: rate estimator configuration TLV
292 *
293 * Replaces the configuration of a rate estimator by calling
294 * gen_kill_estimator() and gen_new_estimator().
295 *
296 * Returns 0 on success or a negative error code.
297 */
298int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
299			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
300			  struct gnet_stats_rate_est64 *rate_est,
301			  spinlock_t *stats_lock, struct nlattr *opt)
302{
303	gen_kill_estimator(bstats, rate_est);
304	return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
305}
306EXPORT_SYMBOL(gen_replace_estimator);
307
308/**
309 * gen_estimator_active - test if estimator is currently in use
310 * @bstats: basic statistics
311 * @rate_est: rate estimator statistics
312 *
313 * Returns true if estimator is active, and false if not.
314 */
315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
316			  const struct gnet_stats_rate_est64 *rate_est)
317{
318	bool res;
319
320	ASSERT_RTNL();
321
322	spin_lock_bh(&est_tree_lock);
323	res = gen_find_node(bstats, rate_est) != NULL;
324	spin_unlock_bh(&est_tree_lock);
325
326	return res;
327}
328EXPORT_SYMBOL(gen_estimator_active);
329