root/net/sched/sch_codel.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dequeue_func
  2. drop_func
  3. codel_qdisc_dequeue
  4. codel_qdisc_enqueue
  5. codel_change
  6. codel_init
  7. codel_dump
  8. codel_dump_stats
  9. codel_reset
  10. codel_module_init
  11. codel_module_exit

   1 /*
   2  * Codel - The Controlled-Delay Active Queue Management algorithm
   3  *
   4  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
   5  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
   6  *
   7  *  Implemented on linux by :
   8  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
   9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  10  *
  11  * Redistribution and use in source and binary forms, with or without
  12  * modification, are permitted provided that the following conditions
  13  * are met:
  14  * 1. Redistributions of source code must retain the above copyright
  15  *    notice, this list of conditions, and the following disclaimer,
  16  *    without modification.
  17  * 2. Redistributions in binary form must reproduce the above copyright
  18  *    notice, this list of conditions and the following disclaimer in the
  19  *    documentation and/or other materials provided with the distribution.
  20  * 3. The names of the authors may not be used to endorse or promote products
  21  *    derived from this software without specific prior written permission.
  22  *
  23  * Alternatively, provided that this notice is retained in full, this
  24  * software may be distributed under the terms of the GNU General
  25  * Public License ("GPL") version 2, in which case the provisions of the
  26  * GPL apply INSTEAD OF those given above.
  27  *
  28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  39  * DAMAGE.
  40  *
  41  */
  42 
  43 #include <linux/module.h>
  44 #include <linux/slab.h>
  45 #include <linux/types.h>
  46 #include <linux/kernel.h>
  47 #include <linux/errno.h>
  48 #include <linux/skbuff.h>
  49 #include <linux/prefetch.h>
  50 #include <net/pkt_sched.h>
  51 #include <net/codel.h>
  52 #include <net/codel_impl.h>
  53 #include <net/codel_qdisc.h>
  54 
  55 
  56 #define DEFAULT_CODEL_LIMIT 1000
  57 
  58 struct codel_sched_data {
  59         struct codel_params     params;
  60         struct codel_vars       vars;
  61         struct codel_stats      stats;
  62         u32                     drop_overlimit;
  63 };
  64 
  65 /* This is the specific function called from codel_dequeue()
  66  * to dequeue a packet from queue. Note: backlog is handled in
  67  * codel, we dont need to reduce it here.
  68  */
  69 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
  70 {
  71         struct Qdisc *sch = ctx;
  72         struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  73 
  74         if (skb) {
  75                 sch->qstats.backlog -= qdisc_pkt_len(skb);
  76                 prefetch(&skb->end); /* we'll need skb_shinfo() */
  77         }
  78         return skb;
  79 }
  80 
  81 static void drop_func(struct sk_buff *skb, void *ctx)
  82 {
  83         struct Qdisc *sch = ctx;
  84 
  85         kfree_skb(skb);
  86         qdisc_qstats_drop(sch);
  87 }
  88 
  89 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
  90 {
  91         struct codel_sched_data *q = qdisc_priv(sch);
  92         struct sk_buff *skb;
  93 
  94         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
  95                             &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
  96                             drop_func, dequeue_func);
  97 
  98         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
  99          * or HTB crashes. Defer it for next round.
 100          */
 101         if (q->stats.drop_count && sch->q.qlen) {
 102                 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
 103                 q->stats.drop_count = 0;
 104                 q->stats.drop_len = 0;
 105         }
 106         if (skb)
 107                 qdisc_bstats_update(sch, skb);
 108         return skb;
 109 }
 110 
 111 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 112                                struct sk_buff **to_free)
 113 {
 114         struct codel_sched_data *q;
 115 
 116         if (likely(qdisc_qlen(sch) < sch->limit)) {
 117                 codel_set_enqueue_time(skb);
 118                 return qdisc_enqueue_tail(skb, sch);
 119         }
 120         q = qdisc_priv(sch);
 121         q->drop_overlimit++;
 122         return qdisc_drop(skb, sch, to_free);
 123 }
 124 
 125 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
 126         [TCA_CODEL_TARGET]      = { .type = NLA_U32 },
 127         [TCA_CODEL_LIMIT]       = { .type = NLA_U32 },
 128         [TCA_CODEL_INTERVAL]    = { .type = NLA_U32 },
 129         [TCA_CODEL_ECN]         = { .type = NLA_U32 },
 130         [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 131 };
 132 
 133 static int codel_change(struct Qdisc *sch, struct nlattr *opt,
 134                         struct netlink_ext_ack *extack)
 135 {
 136         struct codel_sched_data *q = qdisc_priv(sch);
 137         struct nlattr *tb[TCA_CODEL_MAX + 1];
 138         unsigned int qlen, dropped = 0;
 139         int err;
 140 
 141         if (!opt)
 142                 return -EINVAL;
 143 
 144         err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
 145                                           codel_policy, NULL);
 146         if (err < 0)
 147                 return err;
 148 
 149         sch_tree_lock(sch);
 150 
 151         if (tb[TCA_CODEL_TARGET]) {
 152                 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
 153 
 154                 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
 155         }
 156 
 157         if (tb[TCA_CODEL_CE_THRESHOLD]) {
 158                 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
 159 
 160                 q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
 161         }
 162 
 163         if (tb[TCA_CODEL_INTERVAL]) {
 164                 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
 165 
 166                 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
 167         }
 168 
 169         if (tb[TCA_CODEL_LIMIT])
 170                 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
 171 
 172         if (tb[TCA_CODEL_ECN])
 173                 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
 174 
 175         qlen = sch->q.qlen;
 176         while (sch->q.qlen > sch->limit) {
 177                 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 178 
 179                 dropped += qdisc_pkt_len(skb);
 180                 qdisc_qstats_backlog_dec(sch, skb);
 181                 rtnl_qdisc_drop(skb, sch);
 182         }
 183         qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 184 
 185         sch_tree_unlock(sch);
 186         return 0;
 187 }
 188 
 189 static int codel_init(struct Qdisc *sch, struct nlattr *opt,
 190                       struct netlink_ext_ack *extack)
 191 {
 192         struct codel_sched_data *q = qdisc_priv(sch);
 193 
 194         sch->limit = DEFAULT_CODEL_LIMIT;
 195 
 196         codel_params_init(&q->params);
 197         codel_vars_init(&q->vars);
 198         codel_stats_init(&q->stats);
 199         q->params.mtu = psched_mtu(qdisc_dev(sch));
 200 
 201         if (opt) {
 202                 int err = codel_change(sch, opt, extack);
 203 
 204                 if (err)
 205                         return err;
 206         }
 207 
 208         if (sch->limit >= 1)
 209                 sch->flags |= TCQ_F_CAN_BYPASS;
 210         else
 211                 sch->flags &= ~TCQ_F_CAN_BYPASS;
 212 
 213         return 0;
 214 }
 215 
 216 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
 217 {
 218         struct codel_sched_data *q = qdisc_priv(sch);
 219         struct nlattr *opts;
 220 
 221         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 222         if (opts == NULL)
 223                 goto nla_put_failure;
 224 
 225         if (nla_put_u32(skb, TCA_CODEL_TARGET,
 226                         codel_time_to_us(q->params.target)) ||
 227             nla_put_u32(skb, TCA_CODEL_LIMIT,
 228                         sch->limit) ||
 229             nla_put_u32(skb, TCA_CODEL_INTERVAL,
 230                         codel_time_to_us(q->params.interval)) ||
 231             nla_put_u32(skb, TCA_CODEL_ECN,
 232                         q->params.ecn))
 233                 goto nla_put_failure;
 234         if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
 235             nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
 236                         codel_time_to_us(q->params.ce_threshold)))
 237                 goto nla_put_failure;
 238         return nla_nest_end(skb, opts);
 239 
 240 nla_put_failure:
 241         nla_nest_cancel(skb, opts);
 242         return -1;
 243 }
 244 
 245 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 246 {
 247         const struct codel_sched_data *q = qdisc_priv(sch);
 248         struct tc_codel_xstats st = {
 249                 .maxpacket      = q->stats.maxpacket,
 250                 .count          = q->vars.count,
 251                 .lastcount      = q->vars.lastcount,
 252                 .drop_overlimit = q->drop_overlimit,
 253                 .ldelay         = codel_time_to_us(q->vars.ldelay),
 254                 .dropping       = q->vars.dropping,
 255                 .ecn_mark       = q->stats.ecn_mark,
 256                 .ce_mark        = q->stats.ce_mark,
 257         };
 258 
 259         if (q->vars.dropping) {
 260                 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
 261 
 262                 if (delta >= 0)
 263                         st.drop_next = codel_time_to_us(delta);
 264                 else
 265                         st.drop_next = -codel_time_to_us(-delta);
 266         }
 267 
 268         return gnet_stats_copy_app(d, &st, sizeof(st));
 269 }
 270 
 271 static void codel_reset(struct Qdisc *sch)
 272 {
 273         struct codel_sched_data *q = qdisc_priv(sch);
 274 
 275         qdisc_reset_queue(sch);
 276         codel_vars_init(&q->vars);
 277 }
 278 
 279 static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
 280         .id             =       "codel",
 281         .priv_size      =       sizeof(struct codel_sched_data),
 282 
 283         .enqueue        =       codel_qdisc_enqueue,
 284         .dequeue        =       codel_qdisc_dequeue,
 285         .peek           =       qdisc_peek_dequeued,
 286         .init           =       codel_init,
 287         .reset          =       codel_reset,
 288         .change         =       codel_change,
 289         .dump           =       codel_dump,
 290         .dump_stats     =       codel_dump_stats,
 291         .owner          =       THIS_MODULE,
 292 };
 293 
 294 static int __init codel_module_init(void)
 295 {
 296         return register_qdisc(&codel_qdisc_ops);
 297 }
 298 
 299 static void __exit codel_module_exit(void)
 300 {
 301         unregister_qdisc(&codel_qdisc_ops);
 302 }
 303 
 304 module_init(codel_module_init)
 305 module_exit(codel_module_exit)
 306 
 307 MODULE_DESCRIPTION("Controlled Delay queue discipline");
 308 MODULE_AUTHOR("Dave Taht");
 309 MODULE_AUTHOR("Eric Dumazet");
 310 MODULE_LICENSE("Dual BSD/GPL");

/* [<][>][^][v][top][bottom][index][help] */