1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h>
9#include <linux/percpu.h>
10#include <linux/dynamic_queue_limits.h>
11#include <net/gen_stats.h>
12#include <net/rtnetlink.h>
13
14struct Qdisc_ops;
15struct qdisc_walker;
16struct tcf_walker;
17struct module;
18
19struct qdisc_rate_table {
20	struct tc_ratespec rate;
21	u32		data[256];
22	struct qdisc_rate_table *next;
23	int		refcnt;
24};
25
26enum qdisc_state_t {
27	__QDISC_STATE_SCHED,
28	__QDISC_STATE_DEACTIVATED,
29	__QDISC_STATE_THROTTLED,
30};
31
32/*
33 * following bits are only changed while qdisc lock is held
34 */
35enum qdisc___state_t {
36	__QDISC___STATE_RUNNING = 1,
37};
38
39struct qdisc_size_table {
40	struct rcu_head		rcu;
41	struct list_head	list;
42	struct tc_sizespec	szopts;
43	int			refcnt;
44	u16			data[];
45};
46
47struct Qdisc {
48	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
49	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
50	unsigned int		flags;
51#define TCQ_F_BUILTIN		1
52#define TCQ_F_INGRESS		2
53#define TCQ_F_CAN_BYPASS	4
54#define TCQ_F_MQROOT		8
55#define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
56				      * q->dev_queue : It can test
57				      * netif_xmit_frozen_or_stopped() before
58				      * dequeueing next packet.
59				      * Its true for MQ/MQPRIO slaves, or non
60				      * multiqueue device.
61				      */
62#define TCQ_F_WARN_NONWC	(1 << 16)
63#define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
64#define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
65				      * qdisc_tree_decrease_qlen() should stop.
66				      */
67	u32			limit;
68	const struct Qdisc_ops	*ops;
69	struct qdisc_size_table	__rcu *stab;
70	struct list_head	list;
71	u32			handle;
72	u32			parent;
73	int			(*reshape_fail)(struct sk_buff *skb,
74					struct Qdisc *q);
75
76	void			*u32_node;
77
78	/* This field is deprecated, but it is still used by CBQ
79	 * and it will live until better solution will be invented.
80	 */
81	struct Qdisc		*__parent;
82	struct netdev_queue	*dev_queue;
83
84	struct gnet_stats_rate_est64	rate_est;
85	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
86	struct gnet_stats_queue	__percpu *cpu_qstats;
87
88	struct Qdisc		*next_sched;
89	struct sk_buff		*gso_skb;
90	/*
91	 * For performance sake on SMP, we put highly modified fields at the end
92	 */
93	unsigned long		state;
94	struct sk_buff_head	q;
95	struct gnet_stats_basic_packed bstats;
96	unsigned int		__state;
97	struct gnet_stats_queue	qstats;
98	struct rcu_head		rcu_head;
99	int			padded;
100	atomic_t		refcnt;
101
102	spinlock_t		busylock ____cacheline_aligned_in_smp;
103};
104
105static inline bool qdisc_is_running(const struct Qdisc *qdisc)
106{
107	return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
108}
109
110static inline bool qdisc_run_begin(struct Qdisc *qdisc)
111{
112	if (qdisc_is_running(qdisc))
113		return false;
114	qdisc->__state |= __QDISC___STATE_RUNNING;
115	return true;
116}
117
118static inline void qdisc_run_end(struct Qdisc *qdisc)
119{
120	qdisc->__state &= ~__QDISC___STATE_RUNNING;
121}
122
123static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
124{
125	return qdisc->flags & TCQ_F_ONETXQUEUE;
126}
127
128static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
129{
130#ifdef CONFIG_BQL
131	/* Non-BQL migrated drivers will return 0, too. */
132	return dql_avail(&txq->dql);
133#else
134	return 0;
135#endif
136}
137
138static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
139{
140	return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
141}
142
143static inline void qdisc_throttled(struct Qdisc *qdisc)
144{
145	set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
146}
147
148static inline void qdisc_unthrottled(struct Qdisc *qdisc)
149{
150	clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
151}
152
153struct Qdisc_class_ops {
154	/* Child qdisc manipulation */
155	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
156	int			(*graft)(struct Qdisc *, unsigned long cl,
157					struct Qdisc *, struct Qdisc **);
158	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
159	void			(*qlen_notify)(struct Qdisc *, unsigned long);
160
161	/* Class manipulation routines */
162	unsigned long		(*get)(struct Qdisc *, u32 classid);
163	void			(*put)(struct Qdisc *, unsigned long);
164	int			(*change)(struct Qdisc *, u32, u32,
165					struct nlattr **, unsigned long *);
166	int			(*delete)(struct Qdisc *, unsigned long);
167	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
168
169	/* Filter manipulation */
170	struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
171	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
172					u32 classid);
173	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
174
175	/* rtnetlink specific */
176	int			(*dump)(struct Qdisc *, unsigned long,
177					struct sk_buff *skb, struct tcmsg*);
178	int			(*dump_stats)(struct Qdisc *, unsigned long,
179					struct gnet_dump *);
180};
181
182struct Qdisc_ops {
183	struct Qdisc_ops	*next;
184	const struct Qdisc_class_ops	*cl_ops;
185	char			id[IFNAMSIZ];
186	int			priv_size;
187
188	int 			(*enqueue)(struct sk_buff *, struct Qdisc *);
189	struct sk_buff *	(*dequeue)(struct Qdisc *);
190	struct sk_buff *	(*peek)(struct Qdisc *);
191	unsigned int		(*drop)(struct Qdisc *);
192
193	int			(*init)(struct Qdisc *, struct nlattr *arg);
194	void			(*reset)(struct Qdisc *);
195	void			(*destroy)(struct Qdisc *);
196	int			(*change)(struct Qdisc *, struct nlattr *arg);
197	void			(*attach)(struct Qdisc *);
198
199	int			(*dump)(struct Qdisc *, struct sk_buff *);
200	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
201
202	struct module		*owner;
203};
204
205
206struct tcf_result {
207	unsigned long	class;
208	u32		classid;
209};
210
211struct tcf_proto_ops {
212	struct list_head	head;
213	char			kind[IFNAMSIZ];
214
215	int			(*classify)(struct sk_buff *,
216					    const struct tcf_proto *,
217					    struct tcf_result *);
218	int			(*init)(struct tcf_proto*);
219	bool			(*destroy)(struct tcf_proto*, bool);
220
221	unsigned long		(*get)(struct tcf_proto*, u32 handle);
222	int			(*change)(struct net *net, struct sk_buff *,
223					struct tcf_proto*, unsigned long,
224					u32 handle, struct nlattr **,
225					unsigned long *, bool);
226	int			(*delete)(struct tcf_proto*, unsigned long);
227	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
228
229	/* rtnetlink specific */
230	int			(*dump)(struct net*, struct tcf_proto*, unsigned long,
231					struct sk_buff *skb, struct tcmsg*);
232
233	struct module		*owner;
234};
235
236struct tcf_proto {
237	/* Fast access part */
238	struct tcf_proto __rcu	*next;
239	void __rcu		*root;
240	int			(*classify)(struct sk_buff *,
241					    const struct tcf_proto *,
242					    struct tcf_result *);
243	__be16			protocol;
244
245	/* All the rest */
246	u32			prio;
247	u32			classid;
248	struct Qdisc		*q;
249	void			*data;
250	const struct tcf_proto_ops	*ops;
251	struct rcu_head		rcu;
252};
253
254struct qdisc_skb_cb {
255	unsigned int		pkt_len;
256	u16			slave_dev_queue_mapping;
257	u16			_pad;
258#define QDISC_CB_PRIV_LEN 20
259	unsigned char		data[QDISC_CB_PRIV_LEN];
260};
261
262static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
263{
264	struct qdisc_skb_cb *qcb;
265
266	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
267	BUILD_BUG_ON(sizeof(qcb->data) < sz);
268}
269
270static inline int qdisc_qlen(const struct Qdisc *q)
271{
272	return q->q.qlen;
273}
274
275static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
276{
277	return (struct qdisc_skb_cb *)skb->cb;
278}
279
280static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
281{
282	return &qdisc->q.lock;
283}
284
285static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
286{
287	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
288
289	return q;
290}
291
292static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
293{
294	return qdisc->dev_queue->qdisc_sleeping;
295}
296
297/* The qdisc root lock is a mechanism by which to top level
298 * of a qdisc tree can be locked from any qdisc node in the
299 * forest.  This allows changing the configuration of some
300 * aspect of the qdisc tree while blocking out asynchronous
301 * qdisc access in the packet processing paths.
302 *
303 * It is only legal to do this when the root will not change
304 * on us.  Otherwise we'll potentially lock the wrong qdisc
305 * root.  This is enforced by holding the RTNL semaphore, which
306 * all users of this lock accessor must do.
307 */
308static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
309{
310	struct Qdisc *root = qdisc_root(qdisc);
311
312	ASSERT_RTNL();
313	return qdisc_lock(root);
314}
315
316static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
317{
318	struct Qdisc *root = qdisc_root_sleeping(qdisc);
319
320	ASSERT_RTNL();
321	return qdisc_lock(root);
322}
323
324static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
325{
326	return qdisc->dev_queue->dev;
327}
328
329static inline void sch_tree_lock(const struct Qdisc *q)
330{
331	spin_lock_bh(qdisc_root_sleeping_lock(q));
332}
333
334static inline void sch_tree_unlock(const struct Qdisc *q)
335{
336	spin_unlock_bh(qdisc_root_sleeping_lock(q));
337}
338
339#define tcf_tree_lock(tp)	sch_tree_lock((tp)->q)
340#define tcf_tree_unlock(tp)	sch_tree_unlock((tp)->q)
341
342extern struct Qdisc noop_qdisc;
343extern struct Qdisc_ops noop_qdisc_ops;
344extern struct Qdisc_ops pfifo_fast_ops;
345extern struct Qdisc_ops mq_qdisc_ops;
346extern const struct Qdisc_ops *default_qdisc_ops;
347
348struct Qdisc_class_common {
349	u32			classid;
350	struct hlist_node	hnode;
351};
352
353struct Qdisc_class_hash {
354	struct hlist_head	*hash;
355	unsigned int		hashsize;
356	unsigned int		hashmask;
357	unsigned int		hashelems;
358};
359
360static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
361{
362	id ^= id >> 8;
363	id ^= id >> 4;
364	return id & mask;
365}
366
367static inline struct Qdisc_class_common *
368qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
369{
370	struct Qdisc_class_common *cl;
371	unsigned int h;
372
373	h = qdisc_class_hash(id, hash->hashmask);
374	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
375		if (cl->classid == id)
376			return cl;
377	}
378	return NULL;
379}
380
381int qdisc_class_hash_init(struct Qdisc_class_hash *);
382void qdisc_class_hash_insert(struct Qdisc_class_hash *,
383			     struct Qdisc_class_common *);
384void qdisc_class_hash_remove(struct Qdisc_class_hash *,
385			     struct Qdisc_class_common *);
386void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
387void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
388
389void dev_init_scheduler(struct net_device *dev);
390void dev_shutdown(struct net_device *dev);
391void dev_activate(struct net_device *dev);
392void dev_deactivate(struct net_device *dev);
393void dev_deactivate_many(struct list_head *head);
394struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
395			      struct Qdisc *qdisc);
396void qdisc_reset(struct Qdisc *qdisc);
397void qdisc_destroy(struct Qdisc *qdisc);
398void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
399struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
400			  const struct Qdisc_ops *ops);
401struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
402				const struct Qdisc_ops *ops, u32 parentid);
403void __qdisc_calculate_pkt_len(struct sk_buff *skb,
404			       const struct qdisc_size_table *stab);
405bool tcf_destroy(struct tcf_proto *tp, bool force);
406void tcf_destroy_chain(struct tcf_proto __rcu **fl);
407
408/* Reset all TX qdiscs greater then index of a device.  */
409static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
410{
411	struct Qdisc *qdisc;
412
413	for (; i < dev->num_tx_queues; i++) {
414		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
415		if (qdisc) {
416			spin_lock_bh(qdisc_lock(qdisc));
417			qdisc_reset(qdisc);
418			spin_unlock_bh(qdisc_lock(qdisc));
419		}
420	}
421}
422
423static inline void qdisc_reset_all_tx(struct net_device *dev)
424{
425	qdisc_reset_all_tx_gt(dev, 0);
426}
427
428/* Are all TX queues of the device empty?  */
429static inline bool qdisc_all_tx_empty(const struct net_device *dev)
430{
431	unsigned int i;
432
433	rcu_read_lock();
434	for (i = 0; i < dev->num_tx_queues; i++) {
435		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
436		const struct Qdisc *q = rcu_dereference(txq->qdisc);
437
438		if (q->q.qlen) {
439			rcu_read_unlock();
440			return false;
441		}
442	}
443	rcu_read_unlock();
444	return true;
445}
446
447/* Are any of the TX qdiscs changing?  */
448static inline bool qdisc_tx_changing(const struct net_device *dev)
449{
450	unsigned int i;
451
452	for (i = 0; i < dev->num_tx_queues; i++) {
453		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
454		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
455			return true;
456	}
457	return false;
458}
459
460/* Is the device using the noop qdisc on all queues?  */
461static inline bool qdisc_tx_is_noop(const struct net_device *dev)
462{
463	unsigned int i;
464
465	for (i = 0; i < dev->num_tx_queues; i++) {
466		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
467		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
468			return false;
469	}
470	return true;
471}
472
473static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
474{
475	return qdisc_skb_cb(skb)->pkt_len;
476}
477
478/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
479enum net_xmit_qdisc_t {
480	__NET_XMIT_STOLEN = 0x00010000,
481	__NET_XMIT_BYPASS = 0x00020000,
482};
483
484#ifdef CONFIG_NET_CLS_ACT
485#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
486#else
487#define net_xmit_drop_count(e)	(1)
488#endif
489
490static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
491					   const struct Qdisc *sch)
492{
493#ifdef CONFIG_NET_SCHED
494	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
495
496	if (stab)
497		__qdisc_calculate_pkt_len(skb, stab);
498#endif
499}
500
501static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
502{
503	qdisc_calculate_pkt_len(skb, sch);
504	return sch->enqueue(skb, sch);
505}
506
507static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
508{
509	qdisc_skb_cb(skb)->pkt_len = skb->len;
510	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
511}
512
513static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
514{
515	return q->flags & TCQ_F_CPUSTATS;
516}
517
518static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
519				 const struct sk_buff *skb)
520{
521	bstats->bytes += qdisc_pkt_len(skb);
522	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
523}
524
525static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
526					   const struct sk_buff *skb)
527{
528	struct gnet_stats_basic_cpu *bstats =
529				this_cpu_ptr(sch->cpu_bstats);
530
531	u64_stats_update_begin(&bstats->syncp);
532	bstats_update(&bstats->bstats, skb);
533	u64_stats_update_end(&bstats->syncp);
534}
535
536static inline void qdisc_bstats_update(struct Qdisc *sch,
537				       const struct sk_buff *skb)
538{
539	bstats_update(&sch->bstats, skb);
540}
541
542static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
543					    const struct sk_buff *skb)
544{
545	sch->qstats.backlog -= qdisc_pkt_len(skb);
546}
547
548static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
549					    const struct sk_buff *skb)
550{
551	sch->qstats.backlog += qdisc_pkt_len(skb);
552}
553
554static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
555{
556	sch->qstats.drops += count;
557}
558
559static inline void qdisc_qstats_drop(struct Qdisc *sch)
560{
561	sch->qstats.drops++;
562}
563
564static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
565{
566	struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
567
568	qstats->drops++;
569}
570
571static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
572{
573	sch->qstats.overlimits++;
574}
575
576static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
577				       struct sk_buff_head *list)
578{
579	__skb_queue_tail(list, skb);
580	qdisc_qstats_backlog_inc(sch, skb);
581
582	return NET_XMIT_SUCCESS;
583}
584
585static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
586{
587	return __qdisc_enqueue_tail(skb, sch, &sch->q);
588}
589
590static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
591						   struct sk_buff_head *list)
592{
593	struct sk_buff *skb = __skb_dequeue(list);
594
595	if (likely(skb != NULL)) {
596		qdisc_qstats_backlog_dec(sch, skb);
597		qdisc_bstats_update(sch, skb);
598	}
599
600	return skb;
601}
602
603static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
604{
605	return __qdisc_dequeue_head(sch, &sch->q);
606}
607
608static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
609					      struct sk_buff_head *list)
610{
611	struct sk_buff *skb = __skb_dequeue(list);
612
613	if (likely(skb != NULL)) {
614		unsigned int len = qdisc_pkt_len(skb);
615		qdisc_qstats_backlog_dec(sch, skb);
616		kfree_skb(skb);
617		return len;
618	}
619
620	return 0;
621}
622
623static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
624{
625	return __qdisc_queue_drop_head(sch, &sch->q);
626}
627
628static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
629						   struct sk_buff_head *list)
630{
631	struct sk_buff *skb = __skb_dequeue_tail(list);
632
633	if (likely(skb != NULL))
634		qdisc_qstats_backlog_dec(sch, skb);
635
636	return skb;
637}
638
639static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
640{
641	return __qdisc_dequeue_tail(sch, &sch->q);
642}
643
644static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
645{
646	return skb_peek(&sch->q);
647}
648
649/* generic pseudo peek method for non-work-conserving qdisc */
650static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
651{
652	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
653	if (!sch->gso_skb) {
654		sch->gso_skb = sch->dequeue(sch);
655		if (sch->gso_skb)
656			/* it's still part of the queue */
657			sch->q.qlen++;
658	}
659
660	return sch->gso_skb;
661}
662
663/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
664static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
665{
666	struct sk_buff *skb = sch->gso_skb;
667
668	if (skb) {
669		sch->gso_skb = NULL;
670		sch->q.qlen--;
671	} else {
672		skb = sch->dequeue(sch);
673	}
674
675	return skb;
676}
677
678static inline void __qdisc_reset_queue(struct Qdisc *sch,
679				       struct sk_buff_head *list)
680{
681	/*
682	 * We do not know the backlog in bytes of this list, it
683	 * is up to the caller to correct it
684	 */
685	__skb_queue_purge(list);
686}
687
688static inline void qdisc_reset_queue(struct Qdisc *sch)
689{
690	__qdisc_reset_queue(sch, &sch->q);
691	sch->qstats.backlog = 0;
692}
693
694static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
695					      struct sk_buff_head *list)
696{
697	struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
698
699	if (likely(skb != NULL)) {
700		unsigned int len = qdisc_pkt_len(skb);
701		kfree_skb(skb);
702		return len;
703	}
704
705	return 0;
706}
707
708static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
709{
710	return __qdisc_queue_drop(sch, &sch->q);
711}
712
713static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
714{
715	kfree_skb(skb);
716	qdisc_qstats_drop(sch);
717
718	return NET_XMIT_DROP;
719}
720
721static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
722{
723	qdisc_qstats_drop(sch);
724
725#ifdef CONFIG_NET_CLS_ACT
726	if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
727		goto drop;
728
729	return NET_XMIT_SUCCESS;
730
731drop:
732#endif
733	kfree_skb(skb);
734	return NET_XMIT_DROP;
735}
736
737/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
738   long it will take to send a packet given its size.
739 */
740static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
741{
742	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
743	if (slot < 0)
744		slot = 0;
745	slot >>= rtab->rate.cell_log;
746	if (slot > 255)
747		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
748	return rtab->data[slot];
749}
750
751#ifdef CONFIG_NET_CLS_ACT
752static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
753					    int action)
754{
755	struct sk_buff *n;
756
757	n = skb_clone(skb, gfp_mask);
758
759	if (n) {
760		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
761		n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
762		n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
763	}
764	return n;
765}
766#endif
767
768struct psched_ratecfg {
769	u64	rate_bytes_ps; /* bytes per second */
770	u32	mult;
771	u16	overhead;
772	u8	linklayer;
773	u8	shift;
774};
775
776static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
777				unsigned int len)
778{
779	len += r->overhead;
780
781	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
782		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
783
784	return ((u64)len * r->mult) >> r->shift;
785}
786
787void psched_ratecfg_precompute(struct psched_ratecfg *r,
788			       const struct tc_ratespec *conf,
789			       u64 rate64);
790
791static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
792					  const struct psched_ratecfg *r)
793{
794	memset(res, 0, sizeof(*res));
795
796	/* legacy struct tc_ratespec has a 32bit @rate field
797	 * Qdisc using 64bit rate should add new attributes
798	 * in order to maintain compatibility.
799	 */
800	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
801
802	res->overhead = r->overhead;
803	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
804}
805
806#endif
807