root/net/sched/sch_mq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mq_offload
  2. mq_offload_stats
  3. mq_destroy
  4. mq_init
  5. mq_attach
  6. mq_dump
  7. mq_queue_get
  8. mq_select_queue
  9. mq_graft
  10. mq_leaf
  11. mq_find
  12. mq_dump_class
  13. mq_dump_class_stats
  14. mq_walk

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * net/sched/sch_mq.c           Classful multiqueue dummy scheduler
   4  *
   5  * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
   6  */
   7 
   8 #include <linux/types.h>
   9 #include <linux/slab.h>
  10 #include <linux/kernel.h>
  11 #include <linux/export.h>
  12 #include <linux/string.h>
  13 #include <linux/errno.h>
  14 #include <linux/skbuff.h>
  15 #include <net/netlink.h>
  16 #include <net/pkt_cls.h>
  17 #include <net/pkt_sched.h>
  18 #include <net/sch_generic.h>
  19 
  20 struct mq_sched {
  21         struct Qdisc            **qdiscs;
  22 };
  23 
  24 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
  25 {
  26         struct net_device *dev = qdisc_dev(sch);
  27         struct tc_mq_qopt_offload opt = {
  28                 .command = cmd,
  29                 .handle = sch->handle,
  30         };
  31 
  32         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
  33                 return -EOPNOTSUPP;
  34 
  35         return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
  36 }
  37 
  38 static int mq_offload_stats(struct Qdisc *sch)
  39 {
  40         struct tc_mq_qopt_offload opt = {
  41                 .command = TC_MQ_STATS,
  42                 .handle = sch->handle,
  43                 .stats = {
  44                         .bstats = &sch->bstats,
  45                         .qstats = &sch->qstats,
  46                 },
  47         };
  48 
  49         return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
  50 }
  51 
  52 static void mq_destroy(struct Qdisc *sch)
  53 {
  54         struct net_device *dev = qdisc_dev(sch);
  55         struct mq_sched *priv = qdisc_priv(sch);
  56         unsigned int ntx;
  57 
  58         mq_offload(sch, TC_MQ_DESTROY);
  59 
  60         if (!priv->qdiscs)
  61                 return;
  62         for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
  63                 qdisc_put(priv->qdiscs[ntx]);
  64         kfree(priv->qdiscs);
  65 }
  66 
  67 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
  68                    struct netlink_ext_ack *extack)
  69 {
  70         struct net_device *dev = qdisc_dev(sch);
  71         struct mq_sched *priv = qdisc_priv(sch);
  72         struct netdev_queue *dev_queue;
  73         struct Qdisc *qdisc;
  74         unsigned int ntx;
  75 
  76         if (sch->parent != TC_H_ROOT)
  77                 return -EOPNOTSUPP;
  78 
  79         if (!netif_is_multiqueue(dev))
  80                 return -EOPNOTSUPP;
  81 
  82         /* pre-allocate qdiscs, attachment can't fail */
  83         priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
  84                                GFP_KERNEL);
  85         if (!priv->qdiscs)
  86                 return -ENOMEM;
  87 
  88         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  89                 dev_queue = netdev_get_tx_queue(dev, ntx);
  90                 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
  91                                           TC_H_MAKE(TC_H_MAJ(sch->handle),
  92                                                     TC_H_MIN(ntx + 1)),
  93                                           extack);
  94                 if (!qdisc)
  95                         return -ENOMEM;
  96                 priv->qdiscs[ntx] = qdisc;
  97                 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
  98         }
  99 
 100         sch->flags |= TCQ_F_MQROOT;
 101 
 102         mq_offload(sch, TC_MQ_CREATE);
 103         return 0;
 104 }
 105 
 106 static void mq_attach(struct Qdisc *sch)
 107 {
 108         struct net_device *dev = qdisc_dev(sch);
 109         struct mq_sched *priv = qdisc_priv(sch);
 110         struct Qdisc *qdisc, *old;
 111         unsigned int ntx;
 112 
 113         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 114                 qdisc = priv->qdiscs[ntx];
 115                 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
 116                 if (old)
 117                         qdisc_put(old);
 118 #ifdef CONFIG_NET_SCHED
 119                 if (ntx < dev->real_num_tx_queues)
 120                         qdisc_hash_add(qdisc, false);
 121 #endif
 122 
 123         }
 124         kfree(priv->qdiscs);
 125         priv->qdiscs = NULL;
 126 }
 127 
 128 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 129 {
 130         struct net_device *dev = qdisc_dev(sch);
 131         struct Qdisc *qdisc;
 132         unsigned int ntx;
 133         __u32 qlen = 0;
 134 
 135         sch->q.qlen = 0;
 136         memset(&sch->bstats, 0, sizeof(sch->bstats));
 137         memset(&sch->qstats, 0, sizeof(sch->qstats));
 138 
 139         /* MQ supports lockless qdiscs. However, statistics accounting needs
 140          * to account for all, none, or a mix of locked and unlocked child
 141          * qdiscs. Percpu stats are added to counters in-band and locking
 142          * qdisc totals are added at end.
 143          */
 144         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 145                 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 146                 spin_lock_bh(qdisc_lock(qdisc));
 147 
 148                 if (qdisc_is_percpu_stats(qdisc)) {
 149                         qlen = qdisc_qlen_sum(qdisc);
 150                         __gnet_stats_copy_basic(NULL, &sch->bstats,
 151                                                 qdisc->cpu_bstats,
 152                                                 &qdisc->bstats);
 153                         __gnet_stats_copy_queue(&sch->qstats,
 154                                                 qdisc->cpu_qstats,
 155                                                 &qdisc->qstats, qlen);
 156                         sch->q.qlen             += qlen;
 157                 } else {
 158                         sch->q.qlen             += qdisc->q.qlen;
 159                         sch->bstats.bytes       += qdisc->bstats.bytes;
 160                         sch->bstats.packets     += qdisc->bstats.packets;
 161                         sch->qstats.qlen        += qdisc->qstats.qlen;
 162                         sch->qstats.backlog     += qdisc->qstats.backlog;
 163                         sch->qstats.drops       += qdisc->qstats.drops;
 164                         sch->qstats.requeues    += qdisc->qstats.requeues;
 165                         sch->qstats.overlimits  += qdisc->qstats.overlimits;
 166                 }
 167 
 168                 spin_unlock_bh(qdisc_lock(qdisc));
 169         }
 170 
 171         return mq_offload_stats(sch);
 172 }
 173 
 174 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
 175 {
 176         struct net_device *dev = qdisc_dev(sch);
 177         unsigned long ntx = cl - 1;
 178 
 179         if (ntx >= dev->num_tx_queues)
 180                 return NULL;
 181         return netdev_get_tx_queue(dev, ntx);
 182 }
 183 
 184 static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
 185                                             struct tcmsg *tcm)
 186 {
 187         return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
 188 }
 189 
 190 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
 191                     struct Qdisc **old, struct netlink_ext_ack *extack)
 192 {
 193         struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 194         struct tc_mq_qopt_offload graft_offload;
 195         struct net_device *dev = qdisc_dev(sch);
 196 
 197         if (dev->flags & IFF_UP)
 198                 dev_deactivate(dev);
 199 
 200         *old = dev_graft_qdisc(dev_queue, new);
 201         if (new)
 202                 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 203         if (dev->flags & IFF_UP)
 204                 dev_activate(dev);
 205 
 206         graft_offload.handle = sch->handle;
 207         graft_offload.graft_params.queue = cl - 1;
 208         graft_offload.graft_params.child_handle = new ? new->handle : 0;
 209         graft_offload.command = TC_MQ_GRAFT;
 210 
 211         qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
 212                                    TC_SETUP_QDISC_MQ, &graft_offload, extack);
 213         return 0;
 214 }
 215 
 216 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
 217 {
 218         struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 219 
 220         return dev_queue->qdisc_sleeping;
 221 }
 222 
 223 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
 224 {
 225         unsigned int ntx = TC_H_MIN(classid);
 226 
 227         if (!mq_queue_get(sch, ntx))
 228                 return 0;
 229         return ntx;
 230 }
 231 
 232 static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
 233                          struct sk_buff *skb, struct tcmsg *tcm)
 234 {
 235         struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 236 
 237         tcm->tcm_parent = TC_H_ROOT;
 238         tcm->tcm_handle |= TC_H_MIN(cl);
 239         tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
 240         return 0;
 241 }
 242 
 243 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 244                                struct gnet_dump *d)
 245 {
 246         struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 247 
 248         sch = dev_queue->qdisc_sleeping;
 249         if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
 250                                   &sch->bstats) < 0 ||
 251             qdisc_qstats_copy(d, sch) < 0)
 252                 return -1;
 253         return 0;
 254 }
 255 
 256 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 257 {
 258         struct net_device *dev = qdisc_dev(sch);
 259         unsigned int ntx;
 260 
 261         if (arg->stop)
 262                 return;
 263 
 264         arg->count = arg->skip;
 265         for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
 266                 if (arg->fn(sch, ntx + 1, arg) < 0) {
 267                         arg->stop = 1;
 268                         break;
 269                 }
 270                 arg->count++;
 271         }
 272 }
 273 
 274 static const struct Qdisc_class_ops mq_class_ops = {
 275         .select_queue   = mq_select_queue,
 276         .graft          = mq_graft,
 277         .leaf           = mq_leaf,
 278         .find           = mq_find,
 279         .walk           = mq_walk,
 280         .dump           = mq_dump_class,
 281         .dump_stats     = mq_dump_class_stats,
 282 };
 283 
 284 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
 285         .cl_ops         = &mq_class_ops,
 286         .id             = "mq",
 287         .priv_size      = sizeof(struct mq_sched),
 288         .init           = mq_init,
 289         .destroy        = mq_destroy,
 290         .attach         = mq_attach,
 291         .dump           = mq_dump,
 292         .owner          = THIS_MODULE,
 293 };

/* [<][>][^][v][top][bottom][index][help] */