root/drivers/net/ethernet/chelsio/cxgb4/l2t.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vlan_prio
  2. l2t_hold
  3. arp_hash
  4. ipv6_hash
  5. addr_hash
  6. addreq
  7. neigh_replace
  8. write_l2e
  9. send_pending
  10. do_l2t_write_rpl
  11. arpq_enqueue
  12. cxgb4_l2t_send
  13. alloc_l2e
  14. find_or_alloc_l2e
  15. _t4_l2e_free
  16. t4_l2e_free
  17. cxgb4_l2t_release
  18. reuse_entry
  19. cxgb4_l2t_get
  20. cxgb4_select_ntuple
  21. handle_failed_resolution
  22. t4_l2t_update
  23. t4_l2t_alloc_switching
  24. cxgb4_l2t_alloc_switching
  25. t4_init_l2t
  26. l2t_get_idx
  27. l2t_seq_start
  28. l2t_seq_next
  29. l2t_seq_stop
  30. l2e_state
  31. l2t_seq_show
  32. l2t_seq_open

   1 /*
   2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3  *
   4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
   5  *
   6  * This software is available to you under a choice of one of two
   7  * licenses.  You may choose to be licensed under the terms of the GNU
   8  * General Public License (GPL) Version 2, available from the file
   9  * COPYING in the main directory of this source tree, or the
  10  * OpenIB.org BSD license below:
  11  *
  12  *     Redistribution and use in source and binary forms, with or
  13  *     without modification, are permitted provided that the following
  14  *     conditions are met:
  15  *
  16  *      - Redistributions of source code must retain the above
  17  *        copyright notice, this list of conditions and the following
  18  *        disclaimer.
  19  *
  20  *      - Redistributions in binary form must reproduce the above
  21  *        copyright notice, this list of conditions and the following
  22  *        disclaimer in the documentation and/or other materials
  23  *        provided with the distribution.
  24  *
  25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32  * SOFTWARE.
  33  */
  34 
  35 #include <linux/skbuff.h>
  36 #include <linux/netdevice.h>
  37 #include <linux/if.h>
  38 #include <linux/if_vlan.h>
  39 #include <linux/jhash.h>
  40 #include <linux/module.h>
  41 #include <linux/debugfs.h>
  42 #include <linux/seq_file.h>
  43 #include <net/neighbour.h>
  44 #include "cxgb4.h"
  45 #include "l2t.h"
  46 #include "t4_msg.h"
  47 #include "t4fw_api.h"
  48 #include "t4_regs.h"
  49 #include "t4_values.h"
  50 
  51 /* identifies sync vs async L2T_WRITE_REQs */
  52 #define SYNC_WR_S    12
  53 #define SYNC_WR_V(x) ((x) << SYNC_WR_S)
  54 #define SYNC_WR_F    SYNC_WR_V(1)
  55 
  56 struct l2t_data {
  57         unsigned int l2t_start;     /* start index of our piece of the L2T */
  58         unsigned int l2t_size;      /* number of entries in l2tab */
  59         rwlock_t lock;
  60         atomic_t nfree;             /* number of free entries */
  61         struct l2t_entry *rover;    /* starting point for next allocation */
  62         struct l2t_entry l2tab[0];  /* MUST BE LAST */
  63 };
  64 
  65 static inline unsigned int vlan_prio(const struct l2t_entry *e)
  66 {
  67         return e->vlan >> VLAN_PRIO_SHIFT;
  68 }
  69 
  70 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
  71 {
  72         if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
  73                 atomic_dec(&d->nfree);
  74 }
  75 
  76 /*
  77  * To avoid having to check address families we do not allow v4 and v6
  78  * neighbors to be on the same hash chain.  We keep v4 entries in the first
  79  * half of available hash buckets and v6 in the second.  We need at least two
  80  * entries in our L2T for this scheme to work.
  81  */
  82 enum {
  83         L2T_MIN_HASH_BUCKETS = 2,
  84 };
  85 
  86 static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
  87                                     int ifindex)
  88 {
  89         unsigned int l2t_size_half = d->l2t_size / 2;
  90 
  91         return jhash_2words(*key, ifindex, 0) % l2t_size_half;
  92 }
  93 
  94 static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
  95                                      int ifindex)
  96 {
  97         unsigned int l2t_size_half = d->l2t_size / 2;
  98         u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
  99 
 100         return (l2t_size_half +
 101                 (jhash_2words(xor, ifindex, 0) % l2t_size_half));
 102 }
 103 
 104 static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
 105                               int addr_len, int ifindex)
 106 {
 107         return addr_len == 4 ? arp_hash(d, addr, ifindex) :
 108                                ipv6_hash(d, addr, ifindex);
 109 }
 110 
 111 /*
 112  * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
 113  * whether the L2T entry and the address are of the same address family.
 114  * Callers ensure an address is only checked against L2T entries of the same
 115  * family, something made trivial by the separation of IP and IPv6 hash chains
 116  * mentioned above.  Returns 0 if there's a match,
 117  */
 118 static int addreq(const struct l2t_entry *e, const u32 *addr)
 119 {
 120         if (e->v6)
 121                 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
 122                        (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
 123         return e->addr[0] ^ addr[0];
 124 }
 125 
 126 static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
 127 {
 128         neigh_hold(n);
 129         if (e->neigh)
 130                 neigh_release(e->neigh);
 131         e->neigh = n;
 132 }
 133 
 134 /*
 135  * Write an L2T entry.  Must be called with the entry locked.
 136  * The write may be synchronous or asynchronous.
 137  */
 138 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 139 {
 140         struct l2t_data *d = adap->l2t;
 141         unsigned int l2t_idx = e->idx + d->l2t_start;
 142         struct sk_buff *skb;
 143         struct cpl_l2t_write_req *req;
 144 
 145         skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
 146         if (!skb)
 147                 return -ENOMEM;
 148 
 149         req = __skb_put(skb, sizeof(*req));
 150         INIT_TP_WR(req, 0);
 151 
 152         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
 153                                         l2t_idx | (sync ? SYNC_WR_F : 0) |
 154                                         TID_QID_V(adap->sge.fw_evtq.abs_id)));
 155         req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
 156         req->l2t_idx = htons(l2t_idx);
 157         req->vlan = htons(e->vlan);
 158         if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
 159                 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
 160         memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
 161 
 162         t4_mgmt_tx(adap, skb);
 163 
 164         if (sync && e->state != L2T_STATE_SWITCHING)
 165                 e->state = L2T_STATE_SYNC_WRITE;
 166         return 0;
 167 }
 168 
 169 /*
 170  * Send packets waiting in an L2T entry's ARP queue.  Must be called with the
 171  * entry locked.
 172  */
 173 static void send_pending(struct adapter *adap, struct l2t_entry *e)
 174 {
 175         struct sk_buff *skb;
 176 
 177         while ((skb = __skb_dequeue(&e->arpq)) != NULL)
 178                 t4_ofld_send(adap, skb);
 179 }
 180 
 181 /*
 182  * Process a CPL_L2T_WRITE_RPL.  Wake up the ARP queue if it completes a
 183  * synchronous L2T_WRITE.  Note that the TID in the reply is really the L2T
 184  * index it refers to.
 185  */
 186 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 187 {
 188         struct l2t_data *d = adap->l2t;
 189         unsigned int tid = GET_TID(rpl);
 190         unsigned int l2t_idx = tid % L2T_SIZE;
 191 
 192         if (unlikely(rpl->status != CPL_ERR_NONE)) {
 193                 dev_err(adap->pdev_dev,
 194                         "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
 195                         rpl->status, l2t_idx);
 196                 return;
 197         }
 198 
 199         if (tid & SYNC_WR_F) {
 200                 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 201 
 202                 spin_lock(&e->lock);
 203                 if (e->state != L2T_STATE_SWITCHING) {
 204                         send_pending(adap, e);
 205                         e->state = (e->neigh->nud_state & NUD_STALE) ?
 206                                         L2T_STATE_STALE : L2T_STATE_VALID;
 207                 }
 208                 spin_unlock(&e->lock);
 209         }
 210 }
 211 
 212 /*
 213  * Add a packet to an L2T entry's queue of packets awaiting resolution.
 214  * Must be called with the entry's lock held.
 215  */
 216 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
 217 {
 218         __skb_queue_tail(&e->arpq, skb);
 219 }
 220 
 221 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
 222                    struct l2t_entry *e)
 223 {
 224         struct adapter *adap = netdev2adap(dev);
 225 
 226 again:
 227         switch (e->state) {
 228         case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
 229                 neigh_event_send(e->neigh, NULL);
 230                 spin_lock_bh(&e->lock);
 231                 if (e->state == L2T_STATE_STALE)
 232                         e->state = L2T_STATE_VALID;
 233                 spin_unlock_bh(&e->lock);
 234                 /* fall through */
 235         case L2T_STATE_VALID:     /* fast-path, send the packet on */
 236                 return t4_ofld_send(adap, skb);
 237         case L2T_STATE_RESOLVING:
 238         case L2T_STATE_SYNC_WRITE:
 239                 spin_lock_bh(&e->lock);
 240                 if (e->state != L2T_STATE_SYNC_WRITE &&
 241                     e->state != L2T_STATE_RESOLVING) {
 242                         spin_unlock_bh(&e->lock);
 243                         goto again;
 244                 }
 245                 arpq_enqueue(e, skb);
 246                 spin_unlock_bh(&e->lock);
 247 
 248                 if (e->state == L2T_STATE_RESOLVING &&
 249                     !neigh_event_send(e->neigh, NULL)) {
 250                         spin_lock_bh(&e->lock);
 251                         if (e->state == L2T_STATE_RESOLVING &&
 252                             !skb_queue_empty(&e->arpq))
 253                                 write_l2e(adap, e, 1);
 254                         spin_unlock_bh(&e->lock);
 255                 }
 256         }
 257         return 0;
 258 }
 259 EXPORT_SYMBOL(cxgb4_l2t_send);
 260 
 261 /*
 262  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
 263  */
 264 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
 265 {
 266         struct l2t_entry *end, *e, **p;
 267 
 268         if (!atomic_read(&d->nfree))
 269                 return NULL;
 270 
 271         /* there's definitely a free entry */
 272         for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
 273                 if (atomic_read(&e->refcnt) == 0)
 274                         goto found;
 275 
 276         for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
 277                 ;
 278 found:
 279         d->rover = e + 1;
 280         atomic_dec(&d->nfree);
 281 
 282         /*
 283          * The entry we found may be an inactive entry that is
 284          * presently in the hash table.  We need to remove it.
 285          */
 286         if (e->state < L2T_STATE_SWITCHING)
 287                 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
 288                         if (*p == e) {
 289                                 *p = e->next;
 290                                 e->next = NULL;
 291                                 break;
 292                         }
 293 
 294         e->state = L2T_STATE_UNUSED;
 295         return e;
 296 }
 297 
 298 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
 299                                            u8 port, u8 *dmac)
 300 {
 301         struct l2t_entry *end, *e, **p;
 302         struct l2t_entry *first_free = NULL;
 303 
 304         for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
 305                 if (atomic_read(&e->refcnt) == 0) {
 306                         if (!first_free)
 307                                 first_free = e;
 308                 } else {
 309                         if (e->state == L2T_STATE_SWITCHING) {
 310                                 if (ether_addr_equal(e->dmac, dmac) &&
 311                                     (e->vlan == vlan) && (e->lport == port))
 312                                         goto exists;
 313                         }
 314                 }
 315         }
 316 
 317         if (first_free) {
 318                 e = first_free;
 319                 goto found;
 320         }
 321 
 322         return NULL;
 323 
 324 found:
 325         /* The entry we found may be an inactive entry that is
 326          * presently in the hash table.  We need to remove it.
 327          */
 328         if (e->state < L2T_STATE_SWITCHING)
 329                 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
 330                         if (*p == e) {
 331                                 *p = e->next;
 332                                 e->next = NULL;
 333                                 break;
 334                         }
 335         e->state = L2T_STATE_UNUSED;
 336 
 337 exists:
 338         return e;
 339 }
 340 
 341 /* Called when an L2T entry has no more users.  The entry is left in the hash
 342  * table since it is likely to be reused but we also bump nfree to indicate
 343  * that the entry can be reallocated for a different neighbor.  We also drop
 344  * the existing neighbor reference in case the neighbor is going away and is
 345  * waiting on our reference.
 346  *
 347  * Because entries can be reallocated to other neighbors once their ref count
 348  * drops to 0 we need to take the entry's lock to avoid races with a new
 349  * incarnation.
 350  */
 351 static void _t4_l2e_free(struct l2t_entry *e)
 352 {
 353         struct l2t_data *d;
 354         struct sk_buff *skb;
 355 
 356         if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
 357                 if (e->neigh) {
 358                         neigh_release(e->neigh);
 359                         e->neigh = NULL;
 360                 }
 361                 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
 362                         kfree_skb(skb);
 363         }
 364 
 365         d = container_of(e, struct l2t_data, l2tab[e->idx]);
 366         atomic_inc(&d->nfree);
 367 }
 368 
 369 /* Locked version of _t4_l2e_free */
 370 static void t4_l2e_free(struct l2t_entry *e)
 371 {
 372         struct l2t_data *d;
 373         struct sk_buff *skb;
 374 
 375         spin_lock_bh(&e->lock);
 376         if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
 377                 if (e->neigh) {
 378                         neigh_release(e->neigh);
 379                         e->neigh = NULL;
 380                 }
 381                 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
 382                         kfree_skb(skb);
 383         }
 384         spin_unlock_bh(&e->lock);
 385 
 386         d = container_of(e, struct l2t_data, l2tab[e->idx]);
 387         atomic_inc(&d->nfree);
 388 }
 389 
 390 void cxgb4_l2t_release(struct l2t_entry *e)
 391 {
 392         if (atomic_dec_and_test(&e->refcnt))
 393                 t4_l2e_free(e);
 394 }
 395 EXPORT_SYMBOL(cxgb4_l2t_release);
 396 
 397 /*
 398  * Update an L2T entry that was previously used for the same next hop as neigh.
 399  * Must be called with softirqs disabled.
 400  */
 401 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
 402 {
 403         unsigned int nud_state;
 404 
 405         spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
 406         if (neigh != e->neigh)
 407                 neigh_replace(e, neigh);
 408         nud_state = neigh->nud_state;
 409         if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
 410             !(nud_state & NUD_VALID))
 411                 e->state = L2T_STATE_RESOLVING;
 412         else if (nud_state & NUD_CONNECTED)
 413                 e->state = L2T_STATE_VALID;
 414         else
 415                 e->state = L2T_STATE_STALE;
 416         spin_unlock(&e->lock);
 417 }
 418 
 419 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
 420                                 const struct net_device *physdev,
 421                                 unsigned int priority)
 422 {
 423         u8 lport;
 424         u16 vlan;
 425         struct l2t_entry *e;
 426         unsigned int addr_len = neigh->tbl->key_len;
 427         u32 *addr = (u32 *)neigh->primary_key;
 428         int ifidx = neigh->dev->ifindex;
 429         int hash = addr_hash(d, addr, addr_len, ifidx);
 430 
 431         if (neigh->dev->flags & IFF_LOOPBACK)
 432                 lport = netdev2pinfo(physdev)->tx_chan + 4;
 433         else
 434                 lport = netdev2pinfo(physdev)->lport;
 435 
 436         if (is_vlan_dev(neigh->dev)) {
 437                 vlan = vlan_dev_vlan_id(neigh->dev);
 438                 vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority);
 439         } else {
 440                 vlan = VLAN_NONE;
 441         }
 442 
 443         write_lock_bh(&d->lock);
 444         for (e = d->l2tab[hash].first; e; e = e->next)
 445                 if (!addreq(e, addr) && e->ifindex == ifidx &&
 446                     e->vlan == vlan && e->lport == lport) {
 447                         l2t_hold(d, e);
 448                         if (atomic_read(&e->refcnt) == 1)
 449                                 reuse_entry(e, neigh);
 450                         goto done;
 451                 }
 452 
 453         /* Need to allocate a new entry */
 454         e = alloc_l2e(d);
 455         if (e) {
 456                 spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
 457                 e->state = L2T_STATE_RESOLVING;
 458                 if (neigh->dev->flags & IFF_LOOPBACK)
 459                         memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
 460                 memcpy(e->addr, addr, addr_len);
 461                 e->ifindex = ifidx;
 462                 e->hash = hash;
 463                 e->lport = lport;
 464                 e->v6 = addr_len == 16;
 465                 atomic_set(&e->refcnt, 1);
 466                 neigh_replace(e, neigh);
 467                 e->vlan = vlan;
 468                 e->next = d->l2tab[hash].first;
 469                 d->l2tab[hash].first = e;
 470                 spin_unlock(&e->lock);
 471         }
 472 done:
 473         write_unlock_bh(&d->lock);
 474         return e;
 475 }
 476 EXPORT_SYMBOL(cxgb4_l2t_get);
 477 
 478 u64 cxgb4_select_ntuple(struct net_device *dev,
 479                         const struct l2t_entry *l2t)
 480 {
 481         struct adapter *adap = netdev2adap(dev);
 482         struct tp_params *tp = &adap->params.tp;
 483         u64 ntuple = 0;
 484 
 485         /* Initialize each of the fields which we care about which are present
 486          * in the Compressed Filter Tuple.
 487          */
 488         if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
 489                 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
 490 
 491         if (tp->port_shift >= 0)
 492                 ntuple |= (u64)l2t->lport << tp->port_shift;
 493 
 494         if (tp->protocol_shift >= 0)
 495                 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
 496 
 497         if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
 498                 struct port_info *pi = (struct port_info *)netdev_priv(dev);
 499 
 500                 ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
 501                                 FT_VNID_ID_PF_V(adap->pf) |
 502                                 FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
 503         }
 504 
 505         return ntuple;
 506 }
 507 EXPORT_SYMBOL(cxgb4_select_ntuple);
 508 
 509 /*
 510  * Called when address resolution fails for an L2T entry to handle packets
 511  * on the arpq head.  If a packet specifies a failure handler it is invoked,
 512  * otherwise the packet is sent to the device.
 513  */
 514 static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
 515 {
 516         struct sk_buff *skb;
 517 
 518         while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
 519                 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
 520 
 521                 spin_unlock(&e->lock);
 522                 if (cb->arp_err_handler)
 523                         cb->arp_err_handler(cb->handle, skb);
 524                 else
 525                         t4_ofld_send(adap, skb);
 526                 spin_lock(&e->lock);
 527         }
 528 }
 529 
 530 /*
 531  * Called when the host's neighbor layer makes a change to some entry that is
 532  * loaded into the HW L2 table.
 533  */
 534 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
 535 {
 536         struct l2t_entry *e;
 537         struct sk_buff_head *arpq = NULL;
 538         struct l2t_data *d = adap->l2t;
 539         unsigned int addr_len = neigh->tbl->key_len;
 540         u32 *addr = (u32 *) neigh->primary_key;
 541         int ifidx = neigh->dev->ifindex;
 542         int hash = addr_hash(d, addr, addr_len, ifidx);
 543 
 544         read_lock_bh(&d->lock);
 545         for (e = d->l2tab[hash].first; e; e = e->next)
 546                 if (!addreq(e, addr) && e->ifindex == ifidx) {
 547                         spin_lock(&e->lock);
 548                         if (atomic_read(&e->refcnt))
 549                                 goto found;
 550                         spin_unlock(&e->lock);
 551                         break;
 552                 }
 553         read_unlock_bh(&d->lock);
 554         return;
 555 
 556  found:
 557         read_unlock(&d->lock);
 558 
 559         if (neigh != e->neigh)
 560                 neigh_replace(e, neigh);
 561 
 562         if (e->state == L2T_STATE_RESOLVING) {
 563                 if (neigh->nud_state & NUD_FAILED) {
 564                         arpq = &e->arpq;
 565                 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
 566                            !skb_queue_empty(&e->arpq)) {
 567                         write_l2e(adap, e, 1);
 568                 }
 569         } else {
 570                 e->state = neigh->nud_state & NUD_CONNECTED ?
 571                         L2T_STATE_VALID : L2T_STATE_STALE;
 572                 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
 573                         write_l2e(adap, e, 0);
 574         }
 575 
 576         if (arpq)
 577                 handle_failed_resolution(adap, e);
 578         spin_unlock_bh(&e->lock);
 579 }
 580 
 581 /* Allocate an L2T entry for use by a switching rule.  Such need to be
 582  * explicitly freed and while busy they are not on any hash chain, so normal
 583  * address resolution updates do not see them.
 584  */
 585 struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 586                                          u8 port, u8 *eth_addr)
 587 {
 588         struct l2t_data *d = adap->l2t;
 589         struct l2t_entry *e;
 590         int ret;
 591 
 592         write_lock_bh(&d->lock);
 593         e = find_or_alloc_l2e(d, vlan, port, eth_addr);
 594         if (e) {
 595                 spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
 596                 if (!atomic_read(&e->refcnt)) {
 597                         e->state = L2T_STATE_SWITCHING;
 598                         e->vlan = vlan;
 599                         e->lport = port;
 600                         ether_addr_copy(e->dmac, eth_addr);
 601                         atomic_set(&e->refcnt, 1);
 602                         ret = write_l2e(adap, e, 0);
 603                         if (ret < 0) {
 604                                 _t4_l2e_free(e);
 605                                 spin_unlock(&e->lock);
 606                                 write_unlock_bh(&d->lock);
 607                                 return NULL;
 608                         }
 609                 } else {
 610                         atomic_inc(&e->refcnt);
 611                 }
 612 
 613                 spin_unlock(&e->lock);
 614         }
 615         write_unlock_bh(&d->lock);
 616         return e;
 617 }
 618 
 619 /**
 620  * @dev: net_device pointer
 621  * @vlan: VLAN Id
 622  * @port: Associated port
 623  * @dmac: Destination MAC address to add to L2T
 624  * Returns pointer to the allocated l2t entry
 625  *
 626  * Allocates an L2T entry for use by switching rule of a filter
 627  */
 628 struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
 629                                             u8 port, u8 *dmac)
 630 {
 631         struct adapter *adap = netdev2adap(dev);
 632 
 633         return t4_l2t_alloc_switching(adap, vlan, port, dmac);
 634 }
 635 EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
 636 
 637 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 638 {
 639         unsigned int l2t_size;
 640         int i;
 641         struct l2t_data *d;
 642 
 643         if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
 644                 return NULL;
 645         l2t_size = l2t_end - l2t_start + 1;
 646         if (l2t_size < L2T_MIN_HASH_BUCKETS)
 647                 return NULL;
 648 
 649         d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
 650         if (!d)
 651                 return NULL;
 652 
 653         d->l2t_start = l2t_start;
 654         d->l2t_size = l2t_size;
 655 
 656         d->rover = d->l2tab;
 657         atomic_set(&d->nfree, l2t_size);
 658         rwlock_init(&d->lock);
 659 
 660         for (i = 0; i < d->l2t_size; ++i) {
 661                 d->l2tab[i].idx = i;
 662                 d->l2tab[i].state = L2T_STATE_UNUSED;
 663                 spin_lock_init(&d->l2tab[i].lock);
 664                 atomic_set(&d->l2tab[i].refcnt, 0);
 665                 skb_queue_head_init(&d->l2tab[i].arpq);
 666         }
 667         return d;
 668 }
 669 
 670 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 671 {
 672         struct l2t_data *d = seq->private;
 673 
 674         return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 675 }
 676 
 677 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
 678 {
 679         return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 680 }
 681 
 682 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 683 {
 684         v = l2t_get_idx(seq, *pos);
 685         ++(*pos);
 686         return v;
 687 }
 688 
 689 static void l2t_seq_stop(struct seq_file *seq, void *v)
 690 {
 691 }
 692 
 693 static char l2e_state(const struct l2t_entry *e)
 694 {
 695         switch (e->state) {
 696         case L2T_STATE_VALID: return 'V';
 697         case L2T_STATE_STALE: return 'S';
 698         case L2T_STATE_SYNC_WRITE: return 'W';
 699         case L2T_STATE_RESOLVING:
 700                 return skb_queue_empty(&e->arpq) ? 'R' : 'A';
 701         case L2T_STATE_SWITCHING: return 'X';
 702         default:
 703                 return 'U';
 704         }
 705 }
 706 
 707 static int l2t_seq_show(struct seq_file *seq, void *v)
 708 {
 709         if (v == SEQ_START_TOKEN)
 710                 seq_puts(seq, " Idx IP address                "
 711                          "Ethernet address  VLAN/P LP State Users Port\n");
 712         else {
 713                 char ip[60];
 714                 struct l2t_data *d = seq->private;
 715                 struct l2t_entry *e = v;
 716 
 717                 spin_lock_bh(&e->lock);
 718                 if (e->state == L2T_STATE_SWITCHING)
 719                         ip[0] = '\0';
 720                 else
 721                         sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
 722                 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
 723                            e->idx + d->l2t_start, ip, e->dmac,
 724                            e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
 725                            l2e_state(e), atomic_read(&e->refcnt),
 726                            e->neigh ? e->neigh->dev->name : "");
 727                 spin_unlock_bh(&e->lock);
 728         }
 729         return 0;
 730 }
 731 
 732 static const struct seq_operations l2t_seq_ops = {
 733         .start = l2t_seq_start,
 734         .next = l2t_seq_next,
 735         .stop = l2t_seq_stop,
 736         .show = l2t_seq_show
 737 };
 738 
 739 static int l2t_seq_open(struct inode *inode, struct file *file)
 740 {
 741         int rc = seq_open(file, &l2t_seq_ops);
 742 
 743         if (!rc) {
 744                 struct adapter *adap = inode->i_private;
 745                 struct seq_file *seq = file->private_data;
 746 
 747                 seq->private = adap->l2t;
 748         }
 749         return rc;
 750 }
 751 
 752 const struct file_operations t4_l2t_fops = {
 753         .owner = THIS_MODULE,
 754         .open = l2t_seq_open,
 755         .read = seq_read,
 756         .llseek = seq_lseek,
 757         .release = seq_release,
 758 };

/* [<][>][^][v][top][bottom][index][help] */