This source file includes following definitions.
- ifb_ri_tasklet
- ifb_stats64
- ifb_dev_init
- ifb_dev_free
- ifb_setup
- ifb_xmit
- ifb_close
- ifb_open
- ifb_validate
- ifb_init_one
- ifb_init_module
- ifb_cleanup_module
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 #include <linux/module.h>
  28 #include <linux/kernel.h>
  29 #include <linux/netdevice.h>
  30 #include <linux/etherdevice.h>
  31 #include <linux/init.h>
  32 #include <linux/interrupt.h>
  33 #include <linux/moduleparam.h>
  34 #include <net/pkt_sched.h>
  35 #include <net/net_namespace.h>
  36 
  37 #define TX_Q_LIMIT    32
  38 struct ifb_q_private {
  39         struct net_device       *dev;
  40         struct tasklet_struct   ifb_tasklet;
  41         int                     tasklet_pending;
  42         int                     txqnum;
  43         struct sk_buff_head     rq;
  44         u64                     rx_packets;
  45         u64                     rx_bytes;
  46         struct u64_stats_sync   rsync;
  47 
  48         struct u64_stats_sync   tsync;
  49         u64                     tx_packets;
  50         u64                     tx_bytes;
  51         struct sk_buff_head     tq;
  52 } ____cacheline_aligned_in_smp;
  53 
  54 struct ifb_dev_private {
  55         struct ifb_q_private *tx_private;
  56 };
  57 
  58 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
  59 static int ifb_open(struct net_device *dev);
  60 static int ifb_close(struct net_device *dev);
  61 
  62 static void ifb_ri_tasklet(unsigned long _txp)
  63 {
  64         struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
  65         struct netdev_queue *txq;
  66         struct sk_buff *skb;
  67 
  68         txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
  69         skb = skb_peek(&txp->tq);
  70         if (!skb) {
  71                 if (!__netif_tx_trylock(txq))
  72                         goto resched;
  73                 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
  74                 __netif_tx_unlock(txq);
  75         }
  76 
  77         while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
  78                 skb->redirected = 0;
  79                 skb->tc_skip_classify = 1;
  80 
  81                 u64_stats_update_begin(&txp->tsync);
  82                 txp->tx_packets++;
  83                 txp->tx_bytes += skb->len;
  84                 u64_stats_update_end(&txp->tsync);
  85 
  86                 rcu_read_lock();
  87                 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
  88                 if (!skb->dev) {
  89                         rcu_read_unlock();
  90                         dev_kfree_skb(skb);
  91                         txp->dev->stats.tx_dropped++;
  92                         if (skb_queue_len(&txp->tq) != 0)
  93                                 goto resched;
  94                         break;
  95                 }
  96                 rcu_read_unlock();
  97                 skb->skb_iif = txp->dev->ifindex;
  98 
  99                 if (!skb->from_ingress) {
 100                         dev_queue_xmit(skb);
 101                 } else {
 102                         skb_pull_rcsum(skb, skb->mac_len);
 103                         netif_receive_skb(skb);
 104                 }
 105         }
 106 
 107         if (__netif_tx_trylock(txq)) {
 108                 skb = skb_peek(&txp->rq);
 109                 if (!skb) {
 110                         txp->tasklet_pending = 0;
 111                         if (netif_tx_queue_stopped(txq))
 112                                 netif_tx_wake_queue(txq);
 113                 } else {
 114                         __netif_tx_unlock(txq);
 115                         goto resched;
 116                 }
 117                 __netif_tx_unlock(txq);
 118         } else {
 119 resched:
 120                 txp->tasklet_pending = 1;
 121                 tasklet_schedule(&txp->ifb_tasklet);
 122         }
 123 
 124 }
 125 
 126 static void ifb_stats64(struct net_device *dev,
 127                         struct rtnl_link_stats64 *stats)
 128 {
 129         struct ifb_dev_private *dp = netdev_priv(dev);
 130         struct ifb_q_private *txp = dp->tx_private;
 131         unsigned int start;
 132         u64 packets, bytes;
 133         int i;
 134 
 135         for (i = 0; i < dev->num_tx_queues; i++,txp++) {
 136                 do {
 137                         start = u64_stats_fetch_begin_irq(&txp->rsync);
 138                         packets = txp->rx_packets;
 139                         bytes = txp->rx_bytes;
 140                 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
 141                 stats->rx_packets += packets;
 142                 stats->rx_bytes += bytes;
 143 
 144                 do {
 145                         start = u64_stats_fetch_begin_irq(&txp->tsync);
 146                         packets = txp->tx_packets;
 147                         bytes = txp->tx_bytes;
 148                 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
 149                 stats->tx_packets += packets;
 150                 stats->tx_bytes += bytes;
 151         }
 152         stats->rx_dropped = dev->stats.rx_dropped;
 153         stats->tx_dropped = dev->stats.tx_dropped;
 154 }
 155 
 156 static int ifb_dev_init(struct net_device *dev)
 157 {
 158         struct ifb_dev_private *dp = netdev_priv(dev);
 159         struct ifb_q_private *txp;
 160         int i;
 161 
 162         txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
 163         if (!txp)
 164                 return -ENOMEM;
 165         dp->tx_private = txp;
 166         for (i = 0; i < dev->num_tx_queues; i++,txp++) {
 167                 txp->txqnum = i;
 168                 txp->dev = dev;
 169                 __skb_queue_head_init(&txp->rq);
 170                 __skb_queue_head_init(&txp->tq);
 171                 u64_stats_init(&txp->rsync);
 172                 u64_stats_init(&txp->tsync);
 173                 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
 174                              (unsigned long)txp);
 175                 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
 176         }
 177         return 0;
 178 }
 179 
 180 static const struct net_device_ops ifb_netdev_ops = {
 181         .ndo_open       = ifb_open,
 182         .ndo_stop       = ifb_close,
 183         .ndo_get_stats64 = ifb_stats64,
 184         .ndo_start_xmit = ifb_xmit,
 185         .ndo_validate_addr = eth_validate_addr,
 186         .ndo_init       = ifb_dev_init,
 187 };
 188 
 189 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST  | \
 190                       NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6      | \
 191                       NETIF_F_GSO_ENCAP_ALL                             | \
 192                       NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX         | \
 193                       NETIF_F_HW_VLAN_STAG_TX)
 194 
 195 static void ifb_dev_free(struct net_device *dev)
 196 {
 197         struct ifb_dev_private *dp = netdev_priv(dev);
 198         struct ifb_q_private *txp = dp->tx_private;
 199         int i;
 200 
 201         for (i = 0; i < dev->num_tx_queues; i++,txp++) {
 202                 tasklet_kill(&txp->ifb_tasklet);
 203                 __skb_queue_purge(&txp->rq);
 204                 __skb_queue_purge(&txp->tq);
 205         }
 206         kfree(dp->tx_private);
 207 }
 208 
 209 static void ifb_setup(struct net_device *dev)
 210 {
 211         
 212         dev->netdev_ops = &ifb_netdev_ops;
 213 
 214         
 215         ether_setup(dev);
 216         dev->tx_queue_len = TX_Q_LIMIT;
 217 
 218         dev->features |= IFB_FEATURES;
 219         dev->hw_features |= dev->features;
 220         dev->hw_enc_features |= dev->features;
 221         dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
 222                                                NETIF_F_HW_VLAN_STAG_TX);
 223 
 224         dev->flags |= IFF_NOARP;
 225         dev->flags &= ~IFF_MULTICAST;
 226         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 227         netif_keep_dst(dev);
 228         eth_hw_addr_random(dev);
 229         dev->needs_free_netdev = true;
 230         dev->priv_destructor = ifb_dev_free;
 231 
 232         dev->min_mtu = 0;
 233         dev->max_mtu = 0;
 234 }
 235 
 236 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 237 {
 238         struct ifb_dev_private *dp = netdev_priv(dev);
 239         struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 240 
 241         u64_stats_update_begin(&txp->rsync);
 242         txp->rx_packets++;
 243         txp->rx_bytes += skb->len;
 244         u64_stats_update_end(&txp->rsync);
 245 
 246         if (!skb->redirected || !skb->skb_iif) {
 247                 dev_kfree_skb(skb);
 248                 dev->stats.rx_dropped++;
 249                 return NETDEV_TX_OK;
 250         }
 251 
 252         if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
 253                 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
 254 
 255         __skb_queue_tail(&txp->rq, skb);
 256         if (!txp->tasklet_pending) {
 257                 txp->tasklet_pending = 1;
 258                 tasklet_schedule(&txp->ifb_tasklet);
 259         }
 260 
 261         return NETDEV_TX_OK;
 262 }
 263 
 264 static int ifb_close(struct net_device *dev)
 265 {
 266         netif_tx_stop_all_queues(dev);
 267         return 0;
 268 }
 269 
 270 static int ifb_open(struct net_device *dev)
 271 {
 272         netif_tx_start_all_queues(dev);
 273         return 0;
 274 }
 275 
 276 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
 277                         struct netlink_ext_ack *extack)
 278 {
 279         if (tb[IFLA_ADDRESS]) {
 280                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
 281                         return -EINVAL;
 282                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
 283                         return -EADDRNOTAVAIL;
 284         }
 285         return 0;
 286 }
 287 
 288 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
 289         .kind           = "ifb",
 290         .priv_size      = sizeof(struct ifb_dev_private),
 291         .setup          = ifb_setup,
 292         .validate       = ifb_validate,
 293 };
 294 
 295 
 296 
 297 
 298 
 299 static int numifbs = 2;
 300 module_param(numifbs, int, 0);
 301 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 302 
 303 static int __init ifb_init_one(int index)
 304 {
 305         struct net_device *dev_ifb;
 306         int err;
 307 
 308         dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
 309                                NET_NAME_UNKNOWN, ifb_setup);
 310 
 311         if (!dev_ifb)
 312                 return -ENOMEM;
 313 
 314         dev_ifb->rtnl_link_ops = &ifb_link_ops;
 315         err = register_netdevice(dev_ifb);
 316         if (err < 0)
 317                 goto err;
 318 
 319         return 0;
 320 
 321 err:
 322         free_netdev(dev_ifb);
 323         return err;
 324 }
 325 
 326 static int __init ifb_init_module(void)
 327 {
 328         int i, err;
 329 
 330         down_write(&pernet_ops_rwsem);
 331         rtnl_lock();
 332         err = __rtnl_link_register(&ifb_link_ops);
 333         if (err < 0)
 334                 goto out;
 335 
 336         for (i = 0; i < numifbs && !err; i++) {
 337                 err = ifb_init_one(i);
 338                 cond_resched();
 339         }
 340         if (err)
 341                 __rtnl_link_unregister(&ifb_link_ops);
 342 
 343 out:
 344         rtnl_unlock();
 345         up_write(&pernet_ops_rwsem);
 346 
 347         return err;
 348 }
 349 
 350 static void __exit ifb_cleanup_module(void)
 351 {
 352         rtnl_link_unregister(&ifb_link_ops);
 353 }
 354 
 355 module_init(ifb_init_module);
 356 module_exit(ifb_cleanup_module);
 357 MODULE_LICENSE("GPL");
 358 MODULE_AUTHOR("Jamal Hadi Salim");
 359 MODULE_ALIAS_RTNL_LINK("ifb");