root/drivers/net/ethernet/lantiq_xrx200.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xrx200_pmac_r32
  2. xrx200_pmac_w32
  3. xrx200_pmac_mask
  4. xrx200_flush_dma
  5. xrx200_open
  6. xrx200_close
  7. xrx200_alloc_skb
  8. xrx200_hw_receive
  9. xrx200_poll_rx
  10. xrx200_tx_housekeeping
  11. xrx200_start_xmit
  12. xrx200_dma_irq
  13. xrx200_dma_init
  14. xrx200_hw_cleanup
  15. xrx200_probe
  16. xrx200_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Lantiq / Intel PMAC driver for XRX200 SoCs
   4  *
   5  * Copyright (C) 2010 Lantiq Deutschland
   6  * Copyright (C) 2012 John Crispin <john@phrozen.org>
   7  * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
   8  */
   9 
  10 #include <linux/etherdevice.h>
  11 #include <linux/module.h>
  12 #include <linux/platform_device.h>
  13 #include <linux/interrupt.h>
  14 #include <linux/clk.h>
  15 #include <linux/delay.h>
  16 
  17 #include <linux/of_net.h>
  18 #include <linux/of_platform.h>
  19 
  20 #include <xway_dma.h>
  21 
  22 /* DMA */
  23 #define XRX200_DMA_DATA_LEN     0x600
  24 #define XRX200_DMA_RX           0
  25 #define XRX200_DMA_TX           1
  26 
  27 /* cpu port mac */
  28 #define PMAC_RX_IPG             0x0024
  29 #define PMAC_RX_IPG_MASK        0xf
  30 
  31 #define PMAC_HD_CTL             0x0000
  32 /* Add Ethernet header to packets from DMA to PMAC */
  33 #define PMAC_HD_CTL_ADD         BIT(0)
  34 /* Add VLAN tag to Packets from DMA to PMAC */
  35 #define PMAC_HD_CTL_TAG         BIT(1)
  36 /* Add CRC to packets from DMA to PMAC */
  37 #define PMAC_HD_CTL_AC          BIT(2)
  38 /* Add status header to packets from PMAC to DMA */
  39 #define PMAC_HD_CTL_AS          BIT(3)
  40 /* Remove CRC from packets from PMAC to DMA */
  41 #define PMAC_HD_CTL_RC          BIT(4)
  42 /* Remove Layer-2 header from packets from PMAC to DMA */
  43 #define PMAC_HD_CTL_RL2         BIT(5)
  44 /* Status header is present from DMA to PMAC */
  45 #define PMAC_HD_CTL_RXSH        BIT(6)
  46 /* Add special tag from PMAC to switch */
  47 #define PMAC_HD_CTL_AST         BIT(7)
  48 /* Remove specail Tag from PMAC to DMA */
  49 #define PMAC_HD_CTL_RST         BIT(8)
  50 /* Check CRC from DMA to PMAC */
  51 #define PMAC_HD_CTL_CCRC        BIT(9)
  52 /* Enable reaction to Pause frames in the PMAC */
  53 #define PMAC_HD_CTL_FC          BIT(10)
  54 
  55 struct xrx200_chan {
  56         int tx_free;
  57 
  58         struct napi_struct napi;
  59         struct ltq_dma_channel dma;
  60         struct sk_buff *skb[LTQ_DESC_NUM];
  61 
  62         struct xrx200_priv *priv;
  63 };
  64 
  65 struct xrx200_priv {
  66         struct clk *clk;
  67 
  68         struct xrx200_chan chan_tx;
  69         struct xrx200_chan chan_rx;
  70 
  71         struct net_device *net_dev;
  72         struct device *dev;
  73 
  74         __iomem void *pmac_reg;
  75 };
  76 
  77 static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
  78 {
  79         return __raw_readl(priv->pmac_reg + offset);
  80 }
  81 
  82 static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
  83 {
  84         __raw_writel(val, priv->pmac_reg + offset);
  85 }
  86 
  87 static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
  88                              u32 offset)
  89 {
  90         u32 val = xrx200_pmac_r32(priv, offset);
  91 
  92         val &= ~(clear);
  93         val |= set;
  94         xrx200_pmac_w32(priv, val, offset);
  95 }
  96 
  97 /* drop all the packets from the DMA ring */
  98 static void xrx200_flush_dma(struct xrx200_chan *ch)
  99 {
 100         int i;
 101 
 102         for (i = 0; i < LTQ_DESC_NUM; i++) {
 103                 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 104 
 105                 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
 106                         break;
 107 
 108                 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
 109                             XRX200_DMA_DATA_LEN;
 110                 ch->dma.desc++;
 111                 ch->dma.desc %= LTQ_DESC_NUM;
 112         }
 113 }
 114 
 115 static int xrx200_open(struct net_device *net_dev)
 116 {
 117         struct xrx200_priv *priv = netdev_priv(net_dev);
 118 
 119         napi_enable(&priv->chan_tx.napi);
 120         ltq_dma_open(&priv->chan_tx.dma);
 121         ltq_dma_enable_irq(&priv->chan_tx.dma);
 122 
 123         napi_enable(&priv->chan_rx.napi);
 124         ltq_dma_open(&priv->chan_rx.dma);
 125         /* The boot loader does not always deactivate the receiving of frames
 126          * on the ports and then some packets queue up in the PPE buffers.
 127          * They already passed the PMAC so they do not have the tags
 128          * configured here. Read the these packets here and drop them.
 129          * The HW should have written them into memory after 10us
 130          */
 131         usleep_range(20, 40);
 132         xrx200_flush_dma(&priv->chan_rx);
 133         ltq_dma_enable_irq(&priv->chan_rx.dma);
 134 
 135         netif_wake_queue(net_dev);
 136 
 137         return 0;
 138 }
 139 
 140 static int xrx200_close(struct net_device *net_dev)
 141 {
 142         struct xrx200_priv *priv = netdev_priv(net_dev);
 143 
 144         netif_stop_queue(net_dev);
 145 
 146         napi_disable(&priv->chan_rx.napi);
 147         ltq_dma_close(&priv->chan_rx.dma);
 148 
 149         napi_disable(&priv->chan_tx.napi);
 150         ltq_dma_close(&priv->chan_tx.dma);
 151 
 152         return 0;
 153 }
 154 
 155 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 156 {
 157         int ret = 0;
 158 
 159         ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
 160                                                           XRX200_DMA_DATA_LEN);
 161         if (!ch->skb[ch->dma.desc]) {
 162                 ret = -ENOMEM;
 163                 goto skip;
 164         }
 165 
 166         ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
 167                         ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
 168                         DMA_FROM_DEVICE);
 169         if (unlikely(dma_mapping_error(ch->priv->dev,
 170                                        ch->dma.desc_base[ch->dma.desc].addr))) {
 171                 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
 172                 ret = -ENOMEM;
 173                 goto skip;
 174         }
 175 
 176 skip:
 177         ch->dma.desc_base[ch->dma.desc].ctl =
 178                 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
 179                 XRX200_DMA_DATA_LEN;
 180 
 181         return ret;
 182 }
 183 
 184 static int xrx200_hw_receive(struct xrx200_chan *ch)
 185 {
 186         struct xrx200_priv *priv = ch->priv;
 187         struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 188         struct sk_buff *skb = ch->skb[ch->dma.desc];
 189         int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
 190         struct net_device *net_dev = priv->net_dev;
 191         int ret;
 192 
 193         ret = xrx200_alloc_skb(ch);
 194 
 195         ch->dma.desc++;
 196         ch->dma.desc %= LTQ_DESC_NUM;
 197 
 198         if (ret) {
 199                 netdev_err(net_dev, "failed to allocate new rx buffer\n");
 200                 return ret;
 201         }
 202 
 203         skb_put(skb, len);
 204         skb->protocol = eth_type_trans(skb, net_dev);
 205         netif_receive_skb(skb);
 206         net_dev->stats.rx_packets++;
 207         net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
 208 
 209         return 0;
 210 }
 211 
 212 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
 213 {
 214         struct xrx200_chan *ch = container_of(napi,
 215                                 struct xrx200_chan, napi);
 216         int rx = 0;
 217         int ret;
 218 
 219         while (rx < budget) {
 220                 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 221 
 222                 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
 223                         ret = xrx200_hw_receive(ch);
 224                         if (ret)
 225                                 return ret;
 226                         rx++;
 227                 } else {
 228                         break;
 229                 }
 230         }
 231 
 232         if (rx < budget) {
 233                 napi_complete(&ch->napi);
 234                 ltq_dma_enable_irq(&ch->dma);
 235         }
 236 
 237         return rx;
 238 }
 239 
 240 static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
 241 {
 242         struct xrx200_chan *ch = container_of(napi,
 243                                 struct xrx200_chan, napi);
 244         struct net_device *net_dev = ch->priv->net_dev;
 245         int pkts = 0;
 246         int bytes = 0;
 247 
 248         while (pkts < budget) {
 249                 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
 250 
 251                 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
 252                         struct sk_buff *skb = ch->skb[ch->tx_free];
 253 
 254                         pkts++;
 255                         bytes += skb->len;
 256                         ch->skb[ch->tx_free] = NULL;
 257                         consume_skb(skb);
 258                         memset(&ch->dma.desc_base[ch->tx_free], 0,
 259                                sizeof(struct ltq_dma_desc));
 260                         ch->tx_free++;
 261                         ch->tx_free %= LTQ_DESC_NUM;
 262                 } else {
 263                         break;
 264                 }
 265         }
 266 
 267         net_dev->stats.tx_packets += pkts;
 268         net_dev->stats.tx_bytes += bytes;
 269         netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
 270 
 271         if (pkts < budget) {
 272                 napi_complete(&ch->napi);
 273                 ltq_dma_enable_irq(&ch->dma);
 274         }
 275 
 276         return pkts;
 277 }
 278 
 279 static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 280 {
 281         struct xrx200_priv *priv = netdev_priv(net_dev);
 282         struct xrx200_chan *ch = &priv->chan_tx;
 283         struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 284         u32 byte_offset;
 285         dma_addr_t mapping;
 286         int len;
 287 
 288         skb->dev = net_dev;
 289         if (skb_put_padto(skb, ETH_ZLEN)) {
 290                 net_dev->stats.tx_dropped++;
 291                 return NETDEV_TX_OK;
 292         }
 293 
 294         len = skb->len;
 295 
 296         if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
 297                 netdev_err(net_dev, "tx ring full\n");
 298                 netif_stop_queue(net_dev);
 299                 return NETDEV_TX_BUSY;
 300         }
 301 
 302         ch->skb[ch->dma.desc] = skb;
 303 
 304         mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
 305         if (unlikely(dma_mapping_error(priv->dev, mapping)))
 306                 goto err_drop;
 307 
 308         /* dma needs to start on a 16 byte aligned address */
 309         byte_offset = mapping % 16;
 310 
 311         desc->addr = mapping - byte_offset;
 312         /* Make sure the address is written before we give it to HW */
 313         wmb();
 314         desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
 315                 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
 316         ch->dma.desc++;
 317         ch->dma.desc %= LTQ_DESC_NUM;
 318         if (ch->dma.desc == ch->tx_free)
 319                 netif_stop_queue(net_dev);
 320 
 321         netdev_sent_queue(net_dev, len);
 322 
 323         return NETDEV_TX_OK;
 324 
 325 err_drop:
 326         dev_kfree_skb(skb);
 327         net_dev->stats.tx_dropped++;
 328         net_dev->stats.tx_errors++;
 329         return NETDEV_TX_OK;
 330 }
 331 
 332 static const struct net_device_ops xrx200_netdev_ops = {
 333         .ndo_open               = xrx200_open,
 334         .ndo_stop               = xrx200_close,
 335         .ndo_start_xmit         = xrx200_start_xmit,
 336         .ndo_set_mac_address    = eth_mac_addr,
 337         .ndo_validate_addr      = eth_validate_addr,
 338 };
 339 
 340 static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
 341 {
 342         struct xrx200_chan *ch = ptr;
 343 
 344         ltq_dma_disable_irq(&ch->dma);
 345         ltq_dma_ack_irq(&ch->dma);
 346 
 347         napi_schedule(&ch->napi);
 348 
 349         return IRQ_HANDLED;
 350 }
 351 
 352 static int xrx200_dma_init(struct xrx200_priv *priv)
 353 {
 354         struct xrx200_chan *ch_rx = &priv->chan_rx;
 355         struct xrx200_chan *ch_tx = &priv->chan_tx;
 356         int ret = 0;
 357         int i;
 358 
 359         ltq_dma_init_port(DMA_PORT_ETOP);
 360 
 361         ch_rx->dma.nr = XRX200_DMA_RX;
 362         ch_rx->dma.dev = priv->dev;
 363         ch_rx->priv = priv;
 364 
 365         ltq_dma_alloc_rx(&ch_rx->dma);
 366         for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
 367              ch_rx->dma.desc++) {
 368                 ret = xrx200_alloc_skb(ch_rx);
 369                 if (ret)
 370                         goto rx_free;
 371         }
 372         ch_rx->dma.desc = 0;
 373         ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
 374                                "xrx200_net_rx", &priv->chan_rx);
 375         if (ret) {
 376                 dev_err(priv->dev, "failed to request RX irq %d\n",
 377                         ch_rx->dma.irq);
 378                 goto rx_ring_free;
 379         }
 380 
 381         ch_tx->dma.nr = XRX200_DMA_TX;
 382         ch_tx->dma.dev = priv->dev;
 383         ch_tx->priv = priv;
 384 
 385         ltq_dma_alloc_tx(&ch_tx->dma);
 386         ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
 387                                "xrx200_net_tx", &priv->chan_tx);
 388         if (ret) {
 389                 dev_err(priv->dev, "failed to request TX irq %d\n",
 390                         ch_tx->dma.irq);
 391                 goto tx_free;
 392         }
 393 
 394         return ret;
 395 
 396 tx_free:
 397         ltq_dma_free(&ch_tx->dma);
 398 
 399 rx_ring_free:
 400         /* free the allocated RX ring */
 401         for (i = 0; i < LTQ_DESC_NUM; i++) {
 402                 if (priv->chan_rx.skb[i])
 403                         dev_kfree_skb_any(priv->chan_rx.skb[i]);
 404         }
 405 
 406 rx_free:
 407         ltq_dma_free(&ch_rx->dma);
 408         return ret;
 409 }
 410 
 411 static void xrx200_hw_cleanup(struct xrx200_priv *priv)
 412 {
 413         int i;
 414 
 415         ltq_dma_free(&priv->chan_tx.dma);
 416         ltq_dma_free(&priv->chan_rx.dma);
 417 
 418         /* free the allocated RX ring */
 419         for (i = 0; i < LTQ_DESC_NUM; i++)
 420                 dev_kfree_skb_any(priv->chan_rx.skb[i]);
 421 }
 422 
 423 static int xrx200_probe(struct platform_device *pdev)
 424 {
 425         struct device *dev = &pdev->dev;
 426         struct device_node *np = dev->of_node;
 427         struct resource *res;
 428         struct xrx200_priv *priv;
 429         struct net_device *net_dev;
 430         const u8 *mac;
 431         int err;
 432 
 433         /* alloc the network device */
 434         net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
 435         if (!net_dev)
 436                 return -ENOMEM;
 437 
 438         priv = netdev_priv(net_dev);
 439         priv->net_dev = net_dev;
 440         priv->dev = dev;
 441 
 442         net_dev->netdev_ops = &xrx200_netdev_ops;
 443         SET_NETDEV_DEV(net_dev, dev);
 444         net_dev->min_mtu = ETH_ZLEN;
 445         net_dev->max_mtu = XRX200_DMA_DATA_LEN;
 446 
 447         /* load the memory ranges */
 448         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 449         if (!res) {
 450                 dev_err(dev, "failed to get resources\n");
 451                 return -ENOENT;
 452         }
 453 
 454         priv->pmac_reg = devm_ioremap_resource(dev, res);
 455         if (IS_ERR(priv->pmac_reg)) {
 456                 dev_err(dev, "failed to request and remap io ranges\n");
 457                 return PTR_ERR(priv->pmac_reg);
 458         }
 459 
 460         priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
 461         if (priv->chan_rx.dma.irq < 0)
 462                 return -ENOENT;
 463         priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
 464         if (priv->chan_tx.dma.irq < 0)
 465                 return -ENOENT;
 466 
 467         /* get the clock */
 468         priv->clk = devm_clk_get(dev, NULL);
 469         if (IS_ERR(priv->clk)) {
 470                 dev_err(dev, "failed to get clock\n");
 471                 return PTR_ERR(priv->clk);
 472         }
 473 
 474         mac = of_get_mac_address(np);
 475         if (!IS_ERR(mac))
 476                 ether_addr_copy(net_dev->dev_addr, mac);
 477         else
 478                 eth_hw_addr_random(net_dev);
 479 
 480         /* bring up the dma engine and IP core */
 481         err = xrx200_dma_init(priv);
 482         if (err)
 483                 return err;
 484 
 485         /* enable clock gate */
 486         err = clk_prepare_enable(priv->clk);
 487         if (err)
 488                 goto err_uninit_dma;
 489 
 490         /* set IPG to 12 */
 491         xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
 492 
 493         /* enable status header, enable CRC */
 494         xrx200_pmac_mask(priv, 0,
 495                          PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
 496                          PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
 497                          PMAC_HD_CTL);
 498 
 499         /* setup NAPI */
 500         netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
 501         netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
 502 
 503         platform_set_drvdata(pdev, priv);
 504 
 505         err = register_netdev(net_dev);
 506         if (err)
 507                 goto err_unprepare_clk;
 508 
 509         return 0;
 510 
 511 err_unprepare_clk:
 512         clk_disable_unprepare(priv->clk);
 513 
 514 err_uninit_dma:
 515         xrx200_hw_cleanup(priv);
 516 
 517         return err;
 518 }
 519 
 520 static int xrx200_remove(struct platform_device *pdev)
 521 {
 522         struct xrx200_priv *priv = platform_get_drvdata(pdev);
 523         struct net_device *net_dev = priv->net_dev;
 524 
 525         /* free stack related instances */
 526         netif_stop_queue(net_dev);
 527         netif_napi_del(&priv->chan_tx.napi);
 528         netif_napi_del(&priv->chan_rx.napi);
 529 
 530         /* remove the actual device */
 531         unregister_netdev(net_dev);
 532 
 533         /* release the clock */
 534         clk_disable_unprepare(priv->clk);
 535 
 536         /* shut down hardware */
 537         xrx200_hw_cleanup(priv);
 538 
 539         return 0;
 540 }
 541 
 542 static const struct of_device_id xrx200_match[] = {
 543         { .compatible = "lantiq,xrx200-net" },
 544         {},
 545 };
 546 MODULE_DEVICE_TABLE(of, xrx200_match);
 547 
 548 static struct platform_driver xrx200_driver = {
 549         .probe = xrx200_probe,
 550         .remove = xrx200_remove,
 551         .driver = {
 552                 .name = "lantiq,xrx200-net",
 553                 .of_match_table = xrx200_match,
 554         },
 555 };
 556 
 557 module_platform_driver(xrx200_driver);
 558 
 559 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
 560 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
 561 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */