root/net/8021q/vlan_core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vlan_do_receive
  2. __vlan_find_dev_deep_rcu
  3. vlan_dev_real_dev
  4. vlan_dev_vlan_id
  5. vlan_dev_vlan_proto
  6. vlan_group_free
  7. vlan_info_free
  8. vlan_info_rcu_free
  9. vlan_info_alloc
  10. vlan_hw_filter_capable
  11. vlan_vid_info_get
  12. vlan_vid_info_alloc
  13. vlan_add_rx_filter_info
  14. vlan_kill_rx_filter_info
  15. vlan_for_each
  16. vlan_filter_push_vids
  17. vlan_filter_drop_vids
  18. __vlan_vid_add
  19. vlan_vid_add
  20. __vlan_vid_del
  21. vlan_vid_del
  22. vlan_vids_add_by_dev
  23. vlan_vids_del_by_dev
  24. vlan_uses_dev
  25. vlan_gro_receive
  26. vlan_gro_complete
  27. vlan_offload_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/skbuff.h>
   3 #include <linux/netdevice.h>
   4 #include <linux/if_vlan.h>
   5 #include <linux/netpoll.h>
   6 #include <linux/export.h>
   7 #include "vlan.h"
   8 
   9 bool vlan_do_receive(struct sk_buff **skbp)
  10 {
  11         struct sk_buff *skb = *skbp;
  12         __be16 vlan_proto = skb->vlan_proto;
  13         u16 vlan_id = skb_vlan_tag_get_id(skb);
  14         struct net_device *vlan_dev;
  15         struct vlan_pcpu_stats *rx_stats;
  16 
  17         vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
  18         if (!vlan_dev)
  19                 return false;
  20 
  21         skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
  22         if (unlikely(!skb))
  23                 return false;
  24 
  25         if (unlikely(!(vlan_dev->flags & IFF_UP))) {
  26                 kfree_skb(skb);
  27                 *skbp = NULL;
  28                 return false;
  29         }
  30 
  31         skb->dev = vlan_dev;
  32         if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
  33                 /* Our lower layer thinks this is not local, let's make sure.
  34                  * This allows the VLAN to have a different MAC than the
  35                  * underlying device, and still route correctly. */
  36                 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
  37                         skb->pkt_type = PACKET_HOST;
  38         }
  39 
  40         if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
  41             !netif_is_macvlan_port(vlan_dev) &&
  42             !netif_is_bridge_port(vlan_dev)) {
  43                 unsigned int offset = skb->data - skb_mac_header(skb);
  44 
  45                 /*
  46                  * vlan_insert_tag expect skb->data pointing to mac header.
  47                  * So change skb->data before calling it and change back to
  48                  * original position later
  49                  */
  50                 skb_push(skb, offset);
  51                 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
  52                                                     skb->vlan_tci, skb->mac_len);
  53                 if (!skb)
  54                         return false;
  55                 skb_pull(skb, offset + VLAN_HLEN);
  56                 skb_reset_mac_len(skb);
  57         }
  58 
  59         skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
  60         __vlan_hwaccel_clear_tag(skb);
  61 
  62         rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
  63 
  64         u64_stats_update_begin(&rx_stats->syncp);
  65         rx_stats->rx_packets++;
  66         rx_stats->rx_bytes += skb->len;
  67         if (skb->pkt_type == PACKET_MULTICAST)
  68                 rx_stats->rx_multicast++;
  69         u64_stats_update_end(&rx_stats->syncp);
  70 
  71         return true;
  72 }
  73 
  74 /* Must be invoked with rcu_read_lock. */
  75 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
  76                                         __be16 vlan_proto, u16 vlan_id)
  77 {
  78         struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
  79 
  80         if (vlan_info) {
  81                 return vlan_group_get_device(&vlan_info->grp,
  82                                              vlan_proto, vlan_id);
  83         } else {
  84                 /*
  85                  * Lower devices of master uppers (bonding, team) do not have
  86                  * grp assigned to themselves. Grp is assigned to upper device
  87                  * instead.
  88                  */
  89                 struct net_device *upper_dev;
  90 
  91                 upper_dev = netdev_master_upper_dev_get_rcu(dev);
  92                 if (upper_dev)
  93                         return __vlan_find_dev_deep_rcu(upper_dev,
  94                                                     vlan_proto, vlan_id);
  95         }
  96 
  97         return NULL;
  98 }
  99 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 100 
 101 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 102 {
 103         struct net_device *ret = vlan_dev_priv(dev)->real_dev;
 104 
 105         while (is_vlan_dev(ret))
 106                 ret = vlan_dev_priv(ret)->real_dev;
 107 
 108         return ret;
 109 }
 110 EXPORT_SYMBOL(vlan_dev_real_dev);
 111 
 112 u16 vlan_dev_vlan_id(const struct net_device *dev)
 113 {
 114         return vlan_dev_priv(dev)->vlan_id;
 115 }
 116 EXPORT_SYMBOL(vlan_dev_vlan_id);
 117 
 118 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
 119 {
 120         return vlan_dev_priv(dev)->vlan_proto;
 121 }
 122 EXPORT_SYMBOL(vlan_dev_vlan_proto);
 123 
 124 /*
 125  * vlan info and vid list
 126  */
 127 
 128 static void vlan_group_free(struct vlan_group *grp)
 129 {
 130         int i, j;
 131 
 132         for (i = 0; i < VLAN_PROTO_NUM; i++)
 133                 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
 134                         kfree(grp->vlan_devices_arrays[i][j]);
 135 }
 136 
 137 static void vlan_info_free(struct vlan_info *vlan_info)
 138 {
 139         vlan_group_free(&vlan_info->grp);
 140         kfree(vlan_info);
 141 }
 142 
 143 static void vlan_info_rcu_free(struct rcu_head *rcu)
 144 {
 145         vlan_info_free(container_of(rcu, struct vlan_info, rcu));
 146 }
 147 
 148 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
 149 {
 150         struct vlan_info *vlan_info;
 151 
 152         vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
 153         if (!vlan_info)
 154                 return NULL;
 155 
 156         vlan_info->real_dev = dev;
 157         INIT_LIST_HEAD(&vlan_info->vid_list);
 158         return vlan_info;
 159 }
 160 
 161 struct vlan_vid_info {
 162         struct list_head list;
 163         __be16 proto;
 164         u16 vid;
 165         int refcount;
 166 };
 167 
 168 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
 169 {
 170         if (proto == htons(ETH_P_8021Q) &&
 171             dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
 172                 return true;
 173         if (proto == htons(ETH_P_8021AD) &&
 174             dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
 175                 return true;
 176         return false;
 177 }
 178 
 179 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
 180                                                __be16 proto, u16 vid)
 181 {
 182         struct vlan_vid_info *vid_info;
 183 
 184         list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 185                 if (vid_info->proto == proto && vid_info->vid == vid)
 186                         return vid_info;
 187         }
 188         return NULL;
 189 }
 190 
 191 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
 192 {
 193         struct vlan_vid_info *vid_info;
 194 
 195         vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
 196         if (!vid_info)
 197                 return NULL;
 198         vid_info->proto = proto;
 199         vid_info->vid = vid;
 200 
 201         return vid_info;
 202 }
 203 
 204 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
 205 {
 206         if (!vlan_hw_filter_capable(dev, proto))
 207                 return 0;
 208 
 209         if (netif_device_present(dev))
 210                 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
 211         else
 212                 return -ENODEV;
 213 }
 214 
 215 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
 216 {
 217         if (!vlan_hw_filter_capable(dev, proto))
 218                 return 0;
 219 
 220         if (netif_device_present(dev))
 221                 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
 222         else
 223                 return -ENODEV;
 224 }
 225 
 226 int vlan_for_each(struct net_device *dev,
 227                   int (*action)(struct net_device *dev, int vid, void *arg),
 228                   void *arg)
 229 {
 230         struct vlan_vid_info *vid_info;
 231         struct vlan_info *vlan_info;
 232         struct net_device *vdev;
 233         int ret;
 234 
 235         ASSERT_RTNL();
 236 
 237         vlan_info = rtnl_dereference(dev->vlan_info);
 238         if (!vlan_info)
 239                 return 0;
 240 
 241         list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 242                 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
 243                                              vid_info->vid);
 244                 ret = action(vdev, vid_info->vid, arg);
 245                 if (ret)
 246                         return ret;
 247         }
 248 
 249         return 0;
 250 }
 251 EXPORT_SYMBOL(vlan_for_each);
 252 
 253 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
 254 {
 255         struct net_device *real_dev = vlan_info->real_dev;
 256         struct vlan_vid_info *vlan_vid_info;
 257         int err;
 258 
 259         list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
 260                 if (vlan_vid_info->proto == proto) {
 261                         err = vlan_add_rx_filter_info(real_dev, proto,
 262                                                       vlan_vid_info->vid);
 263                         if (err)
 264                                 goto unwind;
 265                 }
 266         }
 267 
 268         return 0;
 269 
 270 unwind:
 271         list_for_each_entry_continue_reverse(vlan_vid_info,
 272                                              &vlan_info->vid_list, list) {
 273                 if (vlan_vid_info->proto == proto)
 274                         vlan_kill_rx_filter_info(real_dev, proto,
 275                                                  vlan_vid_info->vid);
 276         }
 277 
 278         return err;
 279 }
 280 EXPORT_SYMBOL(vlan_filter_push_vids);
 281 
 282 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
 283 {
 284         struct vlan_vid_info *vlan_vid_info;
 285 
 286         list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
 287                 if (vlan_vid_info->proto == proto)
 288                         vlan_kill_rx_filter_info(vlan_info->real_dev,
 289                                                  vlan_vid_info->proto,
 290                                                  vlan_vid_info->vid);
 291 }
 292 EXPORT_SYMBOL(vlan_filter_drop_vids);
 293 
 294 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
 295                           struct vlan_vid_info **pvid_info)
 296 {
 297         struct net_device *dev = vlan_info->real_dev;
 298         struct vlan_vid_info *vid_info;
 299         int err;
 300 
 301         vid_info = vlan_vid_info_alloc(proto, vid);
 302         if (!vid_info)
 303                 return -ENOMEM;
 304 
 305         err = vlan_add_rx_filter_info(dev, proto, vid);
 306         if (err) {
 307                 kfree(vid_info);
 308                 return err;
 309         }
 310 
 311         list_add(&vid_info->list, &vlan_info->vid_list);
 312         vlan_info->nr_vids++;
 313         *pvid_info = vid_info;
 314         return 0;
 315 }
 316 
 317 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
 318 {
 319         struct vlan_info *vlan_info;
 320         struct vlan_vid_info *vid_info;
 321         bool vlan_info_created = false;
 322         int err;
 323 
 324         ASSERT_RTNL();
 325 
 326         vlan_info = rtnl_dereference(dev->vlan_info);
 327         if (!vlan_info) {
 328                 vlan_info = vlan_info_alloc(dev);
 329                 if (!vlan_info)
 330                         return -ENOMEM;
 331                 vlan_info_created = true;
 332         }
 333         vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 334         if (!vid_info) {
 335                 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
 336                 if (err)
 337                         goto out_free_vlan_info;
 338         }
 339         vid_info->refcount++;
 340 
 341         if (vlan_info_created)
 342                 rcu_assign_pointer(dev->vlan_info, vlan_info);
 343 
 344         return 0;
 345 
 346 out_free_vlan_info:
 347         if (vlan_info_created)
 348                 kfree(vlan_info);
 349         return err;
 350 }
 351 EXPORT_SYMBOL(vlan_vid_add);
 352 
 353 static void __vlan_vid_del(struct vlan_info *vlan_info,
 354                            struct vlan_vid_info *vid_info)
 355 {
 356         struct net_device *dev = vlan_info->real_dev;
 357         __be16 proto = vid_info->proto;
 358         u16 vid = vid_info->vid;
 359         int err;
 360 
 361         err = vlan_kill_rx_filter_info(dev, proto, vid);
 362         if (err)
 363                 pr_warn("failed to kill vid %04x/%d for device %s\n",
 364                         proto, vid, dev->name);
 365 
 366         list_del(&vid_info->list);
 367         kfree(vid_info);
 368         vlan_info->nr_vids--;
 369 }
 370 
 371 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
 372 {
 373         struct vlan_info *vlan_info;
 374         struct vlan_vid_info *vid_info;
 375 
 376         ASSERT_RTNL();
 377 
 378         vlan_info = rtnl_dereference(dev->vlan_info);
 379         if (!vlan_info)
 380                 return;
 381 
 382         vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 383         if (!vid_info)
 384                 return;
 385         vid_info->refcount--;
 386         if (vid_info->refcount == 0) {
 387                 __vlan_vid_del(vlan_info, vid_info);
 388                 if (vlan_info->nr_vids == 0) {
 389                         RCU_INIT_POINTER(dev->vlan_info, NULL);
 390                         call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
 391                 }
 392         }
 393 }
 394 EXPORT_SYMBOL(vlan_vid_del);
 395 
 396 int vlan_vids_add_by_dev(struct net_device *dev,
 397                          const struct net_device *by_dev)
 398 {
 399         struct vlan_vid_info *vid_info;
 400         struct vlan_info *vlan_info;
 401         int err;
 402 
 403         ASSERT_RTNL();
 404 
 405         vlan_info = rtnl_dereference(by_dev->vlan_info);
 406         if (!vlan_info)
 407                 return 0;
 408 
 409         list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 410                 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
 411                 if (err)
 412                         goto unwind;
 413         }
 414         return 0;
 415 
 416 unwind:
 417         list_for_each_entry_continue_reverse(vid_info,
 418                                              &vlan_info->vid_list,
 419                                              list) {
 420                 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 421         }
 422 
 423         return err;
 424 }
 425 EXPORT_SYMBOL(vlan_vids_add_by_dev);
 426 
 427 void vlan_vids_del_by_dev(struct net_device *dev,
 428                           const struct net_device *by_dev)
 429 {
 430         struct vlan_vid_info *vid_info;
 431         struct vlan_info *vlan_info;
 432 
 433         ASSERT_RTNL();
 434 
 435         vlan_info = rtnl_dereference(by_dev->vlan_info);
 436         if (!vlan_info)
 437                 return;
 438 
 439         list_for_each_entry(vid_info, &vlan_info->vid_list, list)
 440                 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 441 }
 442 EXPORT_SYMBOL(vlan_vids_del_by_dev);
 443 
 444 bool vlan_uses_dev(const struct net_device *dev)
 445 {
 446         struct vlan_info *vlan_info;
 447 
 448         ASSERT_RTNL();
 449 
 450         vlan_info = rtnl_dereference(dev->vlan_info);
 451         if (!vlan_info)
 452                 return false;
 453         return vlan_info->grp.nr_vlan_devs ? true : false;
 454 }
 455 EXPORT_SYMBOL(vlan_uses_dev);
 456 
 457 static struct sk_buff *vlan_gro_receive(struct list_head *head,
 458                                         struct sk_buff *skb)
 459 {
 460         const struct packet_offload *ptype;
 461         unsigned int hlen, off_vlan;
 462         struct sk_buff *pp = NULL;
 463         struct vlan_hdr *vhdr;
 464         struct sk_buff *p;
 465         __be16 type;
 466         int flush = 1;
 467 
 468         off_vlan = skb_gro_offset(skb);
 469         hlen = off_vlan + sizeof(*vhdr);
 470         vhdr = skb_gro_header_fast(skb, off_vlan);
 471         if (skb_gro_header_hard(skb, hlen)) {
 472                 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
 473                 if (unlikely(!vhdr))
 474                         goto out;
 475         }
 476 
 477         type = vhdr->h_vlan_encapsulated_proto;
 478 
 479         rcu_read_lock();
 480         ptype = gro_find_receive_by_type(type);
 481         if (!ptype)
 482                 goto out_unlock;
 483 
 484         flush = 0;
 485 
 486         list_for_each_entry(p, head, list) {
 487                 struct vlan_hdr *vhdr2;
 488 
 489                 if (!NAPI_GRO_CB(p)->same_flow)
 490                         continue;
 491 
 492                 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
 493                 if (compare_vlan_header(vhdr, vhdr2))
 494                         NAPI_GRO_CB(p)->same_flow = 0;
 495         }
 496 
 497         skb_gro_pull(skb, sizeof(*vhdr));
 498         skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
 499         pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 500 
 501 out_unlock:
 502         rcu_read_unlock();
 503 out:
 504         skb_gro_flush_final(skb, pp, flush);
 505 
 506         return pp;
 507 }
 508 
 509 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
 510 {
 511         struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
 512         __be16 type = vhdr->h_vlan_encapsulated_proto;
 513         struct packet_offload *ptype;
 514         int err = -ENOENT;
 515 
 516         rcu_read_lock();
 517         ptype = gro_find_complete_by_type(type);
 518         if (ptype)
 519                 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
 520 
 521         rcu_read_unlock();
 522         return err;
 523 }
 524 
 525 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
 526         {
 527                 .type = cpu_to_be16(ETH_P_8021Q),
 528                 .priority = 10,
 529                 .callbacks = {
 530                         .gro_receive = vlan_gro_receive,
 531                         .gro_complete = vlan_gro_complete,
 532                 },
 533         },
 534         {
 535                 .type = cpu_to_be16(ETH_P_8021AD),
 536                 .priority = 10,
 537                 .callbacks = {
 538                         .gro_receive = vlan_gro_receive,
 539                         .gro_complete = vlan_gro_complete,
 540                 },
 541         },
 542 };
 543 
 544 static int __init vlan_offload_init(void)
 545 {
 546         unsigned int i;
 547 
 548         for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
 549                 dev_add_offload(&vlan_packet_offloads[i]);
 550 
 551         return 0;
 552 }
 553 
 554 fs_initcall(vlan_offload_init);

/* [<][>][^][v][top][bottom][index][help] */