Lines Matching refs:bp
97 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) in macb_rx_desc() argument
99 return &bp->rx_ring[macb_rx_ring_wrap(index)]; in macb_rx_desc()
102 static void *macb_rx_buffer(struct macb *bp, unsigned int index) in macb_rx_buffer() argument
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); in macb_rx_buffer()
108 static u32 hw_readl_native(struct macb *bp, int offset) in hw_readl_native() argument
110 return __raw_readl(bp->regs + offset); in hw_readl_native()
113 static void hw_writel_native(struct macb *bp, int offset, u32 value) in hw_writel_native() argument
115 __raw_writel(value, bp->regs + offset); in hw_writel_native()
118 static u32 hw_readl(struct macb *bp, int offset) in hw_readl() argument
120 return readl_relaxed(bp->regs + offset); in hw_readl()
123 static void hw_writel(struct macb *bp, int offset, u32 value) in hw_writel() argument
125 writel_relaxed(value, bp->regs + offset); in hw_writel()
158 static void macb_set_hwaddr(struct macb *bp) in macb_set_hwaddr() argument
163 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
164 macb_or_gem_writel(bp, SA1B, bottom); in macb_set_hwaddr()
165 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
166 macb_or_gem_writel(bp, SA1T, top); in macb_set_hwaddr()
169 macb_or_gem_writel(bp, SA2B, 0); in macb_set_hwaddr()
170 macb_or_gem_writel(bp, SA2T, 0); in macb_set_hwaddr()
171 macb_or_gem_writel(bp, SA3B, 0); in macb_set_hwaddr()
172 macb_or_gem_writel(bp, SA3T, 0); in macb_set_hwaddr()
173 macb_or_gem_writel(bp, SA4B, 0); in macb_set_hwaddr()
174 macb_or_gem_writel(bp, SA4T, 0); in macb_set_hwaddr()
177 static void macb_get_hwaddr(struct macb *bp) in macb_get_hwaddr() argument
185 pdata = dev_get_platdata(&bp->pdev->dev); in macb_get_hwaddr()
189 bottom = macb_or_gem_readl(bp, SA1B + i * 8); in macb_get_hwaddr()
190 top = macb_or_gem_readl(bp, SA1T + i * 8); in macb_get_hwaddr()
209 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
215 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
220 struct macb *bp = bus->priv; in macb_mdio_read() local
223 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) in macb_mdio_read()
230 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) in macb_mdio_read()
233 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); in macb_mdio_read()
241 struct macb *bp = bus->priv; in macb_mdio_write() local
243 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) in macb_mdio_write()
251 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) in macb_mdio_write()
303 struct macb *bp = netdev_priv(dev); in macb_handle_link_change() local
304 struct phy_device *phydev = bp->phy_dev; in macb_handle_link_change()
308 spin_lock_irqsave(&bp->lock, flags); in macb_handle_link_change()
311 if ((bp->speed != phydev->speed) || in macb_handle_link_change()
312 (bp->duplex != phydev->duplex)) { in macb_handle_link_change()
315 reg = macb_readl(bp, NCFGR); in macb_handle_link_change()
317 if (macb_is_gem(bp)) in macb_handle_link_change()
325 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) in macb_handle_link_change()
328 macb_or_gem_writel(bp, NCFGR, reg); in macb_handle_link_change()
330 bp->speed = phydev->speed; in macb_handle_link_change()
331 bp->duplex = phydev->duplex; in macb_handle_link_change()
336 if (phydev->link != bp->link) { in macb_handle_link_change()
338 bp->speed = 0; in macb_handle_link_change()
339 bp->duplex = -1; in macb_handle_link_change()
341 bp->link = phydev->link; in macb_handle_link_change()
346 spin_unlock_irqrestore(&bp->lock, flags); in macb_handle_link_change()
353 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); in macb_handle_link_change()
370 struct macb *bp = netdev_priv(dev); in macb_mii_probe() local
376 phydev = phy_find_first(bp->mii_bus); in macb_mii_probe()
382 pdata = dev_get_platdata(&bp->pdev->dev); in macb_mii_probe()
384 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); in macb_mii_probe()
393 bp->phy_interface); in macb_mii_probe()
400 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) in macb_mii_probe()
405 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) in macb_mii_probe()
410 bp->link = 0; in macb_mii_probe()
411 bp->speed = 0; in macb_mii_probe()
412 bp->duplex = -1; in macb_mii_probe()
413 bp->phy_dev = phydev; in macb_mii_probe()
418 static int macb_mii_init(struct macb *bp) in macb_mii_init() argument
425 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_mii_init()
427 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
428 if (bp->mii_bus == NULL) { in macb_mii_init()
433 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
434 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
435 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
436 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
437 bp->pdev->name, bp->pdev->id); in macb_mii_init()
438 bp->mii_bus->priv = bp; in macb_mii_init()
439 bp->mii_bus->parent = &bp->dev->dev; in macb_mii_init()
440 pdata = dev_get_platdata(&bp->pdev->dev); in macb_mii_init()
442 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); in macb_mii_init()
443 if (!bp->mii_bus->irq) { in macb_mii_init()
448 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
450 np = bp->pdev->dev.of_node; in macb_mii_init()
453 err = of_mdiobus_register(bp->mii_bus, np); in macb_mii_init()
457 if (!err && !phy_find_first(bp->mii_bus)) { in macb_mii_init()
461 phydev = mdiobus_scan(bp->mii_bus, i); in macb_mii_init()
473 bp->mii_bus->irq[i] = PHY_POLL; in macb_mii_init()
476 bp->mii_bus->phy_mask = pdata->phy_mask; in macb_mii_init()
478 err = mdiobus_register(bp->mii_bus); in macb_mii_init()
484 err = macb_mii_probe(bp->dev); in macb_mii_init()
491 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
493 kfree(bp->mii_bus->irq); in macb_mii_init()
495 mdiobus_free(bp->mii_bus); in macb_mii_init()
500 static void macb_update_stats(struct macb *bp) in macb_update_stats() argument
502 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
509 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
512 static int macb_halt_tx(struct macb *bp) in macb_halt_tx() argument
517 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); in macb_halt_tx()
522 status = macb_readl(bp, TSR); in macb_halt_tx()
532 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) in macb_tx_unmap() argument
536 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
539 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
554 struct macb *bp = queue->bp; in macb_tx_error_task() local
561 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
562 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
571 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
574 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
581 if (macb_halt_tx(bp)) in macb_tx_error_task()
583 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
600 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
610 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
612 bp->stats.tx_packets++; in macb_tx_error_task()
613 bp->stats.tx_bytes += skb->len; in macb_tx_error_task()
622 netdev_err(bp->dev, in macb_tx_error_task()
628 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
646 macb_writel(bp, TSR, macb_readl(bp, TSR)); in macb_tx_error_task()
650 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
651 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_error_task()
653 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
661 struct macb *bp = queue->bp; in macb_tx_interrupt() local
662 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
664 status = macb_readl(bp, TSR); in macb_tx_interrupt()
665 macb_writel(bp, TSR, status); in macb_tx_interrupt()
667 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
670 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
700 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
702 bp->stats.tx_packets++; in macb_tx_interrupt()
703 bp->stats.tx_bytes += skb->len; in macb_tx_interrupt()
707 macb_tx_unmap(bp, tx_skb); in macb_tx_interrupt()
719 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
722 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
725 static void gem_rx_refill(struct macb *bp) in gem_rx_refill() argument
731 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { in gem_rx_refill()
732 entry = macb_rx_ring_wrap(bp->rx_prepared_head); in gem_rx_refill()
737 bp->rx_prepared_head++; in gem_rx_refill()
739 if (bp->rx_skbuff[entry] == NULL) { in gem_rx_refill()
741 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
743 netdev_err(bp->dev, in gem_rx_refill()
749 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
750 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx_refill()
751 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
756 bp->rx_skbuff[entry] = skb; in gem_rx_refill()
760 bp->rx_ring[entry].addr = paddr; in gem_rx_refill()
761 bp->rx_ring[entry].ctrl = 0; in gem_rx_refill()
766 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); in gem_rx_refill()
767 bp->rx_ring[entry].ctrl = 0; in gem_rx_refill()
774 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", in gem_rx_refill()
775 bp->rx_prepared_head, bp->rx_tail); in gem_rx_refill()
779 static void discard_partial_frame(struct macb *bp, unsigned int begin, in discard_partial_frame() argument
785 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); in discard_partial_frame()
799 static int gem_rx(struct macb *bp, int budget) in gem_rx() argument
810 entry = macb_rx_ring_wrap(bp->rx_tail); in gem_rx()
811 desc = &bp->rx_ring[entry]; in gem_rx()
822 bp->rx_tail++; in gem_rx()
826 netdev_err(bp->dev, in gem_rx()
828 bp->stats.rx_dropped++; in gem_rx()
831 skb = bp->rx_skbuff[entry]; in gem_rx()
833 netdev_err(bp->dev, in gem_rx()
835 bp->stats.rx_dropped++; in gem_rx()
839 bp->rx_skbuff[entry] = NULL; in gem_rx()
840 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
842 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
846 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
847 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
849 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
851 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
852 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
856 bp->stats.rx_packets++; in gem_rx()
857 bp->stats.rx_bytes += skb->len; in gem_rx()
860 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
871 gem_rx_refill(bp); in gem_rx()
876 static int macb_rx_frame(struct macb *bp, unsigned int first_frag, in macb_rx_frame() argument
885 desc = macb_rx_desc(bp, last_frag); in macb_rx_frame()
886 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
888 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
901 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
903 bp->stats.rx_dropped++; in macb_rx_frame()
905 desc = macb_rx_desc(bp, frag); in macb_rx_frame()
923 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
930 macb_rx_buffer(bp, frag), frag_len); in macb_rx_frame()
931 offset += bp->rx_buffer_size; in macb_rx_frame()
932 desc = macb_rx_desc(bp, frag); in macb_rx_frame()
943 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
945 bp->stats.rx_packets++; in macb_rx_frame()
946 bp->stats.rx_bytes += skb->len; in macb_rx_frame()
947 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
954 static int macb_rx(struct macb *bp, int budget) in macb_rx() argument
960 for (tail = bp->rx_tail; budget > 0; tail++) { in macb_rx()
961 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); in macb_rx()
975 discard_partial_frame(bp, first_frag, tail); in macb_rx()
983 dropped = macb_rx_frame(bp, first_frag, tail); in macb_rx()
993 bp->rx_tail = first_frag; in macb_rx()
995 bp->rx_tail = tail; in macb_rx()
1002 struct macb *bp = container_of(napi, struct macb, napi); in macb_poll() local
1006 status = macb_readl(bp, RSR); in macb_poll()
1007 macb_writel(bp, RSR, status); in macb_poll()
1011 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1014 work_done = bp->macbgem_ops.mog_rx(bp, budget); in macb_poll()
1019 status = macb_readl(bp, RSR); in macb_poll()
1021 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1022 macb_writel(bp, ISR, MACB_BIT(RCOMP)); in macb_poll()
1025 macb_writel(bp, IER, MACB_RX_INT_FLAGS); in macb_poll()
1037 struct macb *bp = queue->bp; in macb_interrupt() local
1038 struct net_device *dev = bp->dev; in macb_interrupt()
1046 spin_lock(&bp->lock); in macb_interrupt()
1055 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1056 (unsigned int)(queue - bp->queues), in macb_interrupt()
1068 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1071 if (napi_schedule_prep(&bp->napi)) { in macb_interrupt()
1072 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1073 __napi_schedule(&bp->napi); in macb_interrupt()
1081 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1102 ctrl = macb_readl(bp, NCR); in macb_interrupt()
1103 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_interrupt()
1104 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_interrupt()
1106 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1107 macb_writel(bp, ISR, MACB_BIT(RXUBR)); in macb_interrupt()
1112 if (macb_is_gem(bp)) in macb_interrupt()
1113 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1115 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1117 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1129 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1136 spin_unlock(&bp->lock); in macb_interrupt()
1148 struct macb *bp = netdev_priv(dev); in macb_poll_controller() local
1154 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1160 static unsigned int macb_tx_map(struct macb *bp, in macb_tx_map() argument
1177 size = min(len, bp->max_tx_length); in macb_tx_map()
1181 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1184 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1206 size = min(len, bp->max_tx_length); in macb_tx_map()
1210 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1212 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1230 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
1278 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
1283 macb_tx_unmap(bp, tx_skb); in macb_tx_map()
1292 struct macb *bp = netdev_priv(dev); in macb_start_xmit() local
1293 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
1298 netdev_vdbg(bp->dev, in macb_start_xmit()
1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
1317 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
1322 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
1323 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
1329 if (!macb_tx_map(bp, queue, skb)) { in macb_start_xmit()
1339 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_start_xmit()
1345 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
1350 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) in macb_init_rx_buffer_size() argument
1352 if (!macb_is_gem(bp)) { in macb_init_rx_buffer_size()
1353 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
1355 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
1357 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
1358 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
1361 bp->rx_buffer_size = in macb_init_rx_buffer_size()
1362 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
1366 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", in macb_init_rx_buffer_size()
1367 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
1370 static void gem_free_rx_buffers(struct macb *bp) in gem_free_rx_buffers() argument
1377 if (!bp->rx_skbuff) in gem_free_rx_buffers()
1381 skb = bp->rx_skbuff[i]; in gem_free_rx_buffers()
1386 desc = &bp->rx_ring[i]; in gem_free_rx_buffers()
1388 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
1394 kfree(bp->rx_skbuff); in gem_free_rx_buffers()
1395 bp->rx_skbuff = NULL; in gem_free_rx_buffers()
1398 static void macb_free_rx_buffers(struct macb *bp) in macb_free_rx_buffers() argument
1400 if (bp->rx_buffers) { in macb_free_rx_buffers()
1401 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
1402 RX_RING_SIZE * bp->rx_buffer_size, in macb_free_rx_buffers()
1403 bp->rx_buffers, bp->rx_buffers_dma); in macb_free_rx_buffers()
1404 bp->rx_buffers = NULL; in macb_free_rx_buffers()
1408 static void macb_free_consistent(struct macb *bp) in macb_free_consistent() argument
1413 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
1414 if (bp->rx_ring) { in macb_free_consistent()
1415 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, in macb_free_consistent()
1416 bp->rx_ring, bp->rx_ring_dma); in macb_free_consistent()
1417 bp->rx_ring = NULL; in macb_free_consistent()
1420 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
1424 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, in macb_free_consistent()
1431 static int gem_alloc_rx_buffers(struct macb *bp) in gem_alloc_rx_buffers() argument
1436 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); in gem_alloc_rx_buffers()
1437 if (!bp->rx_skbuff) in gem_alloc_rx_buffers()
1440 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
1442 RX_RING_SIZE, bp->rx_skbuff); in gem_alloc_rx_buffers()
1446 static int macb_alloc_rx_buffers(struct macb *bp) in macb_alloc_rx_buffers() argument
1450 size = RX_RING_SIZE * bp->rx_buffer_size; in macb_alloc_rx_buffers()
1451 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
1452 &bp->rx_buffers_dma, GFP_KERNEL); in macb_alloc_rx_buffers()
1453 if (!bp->rx_buffers) in macb_alloc_rx_buffers()
1456 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
1458 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); in macb_alloc_rx_buffers()
1462 static int macb_alloc_consistent(struct macb *bp) in macb_alloc_consistent() argument
1468 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
1470 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
1475 netdev_dbg(bp->dev, in macb_alloc_consistent()
1487 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
1488 &bp->rx_ring_dma, GFP_KERNEL); in macb_alloc_consistent()
1489 if (!bp->rx_ring) in macb_alloc_consistent()
1491 netdev_dbg(bp->dev, in macb_alloc_consistent()
1493 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); in macb_alloc_consistent()
1495 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
1501 macb_free_consistent(bp); in macb_alloc_consistent()
1505 static void gem_init_rings(struct macb *bp) in gem_init_rings() argument
1511 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
1521 bp->rx_tail = 0; in gem_init_rings()
1522 bp->rx_prepared_head = 0; in gem_init_rings()
1524 gem_rx_refill(bp); in gem_init_rings()
1527 static void macb_init_rings(struct macb *bp) in macb_init_rings() argument
1532 addr = bp->rx_buffers_dma; in macb_init_rings()
1534 bp->rx_ring[i].addr = addr; in macb_init_rings()
1535 bp->rx_ring[i].ctrl = 0; in macb_init_rings()
1536 addr += bp->rx_buffer_size; in macb_init_rings()
1538 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); in macb_init_rings()
1541 bp->queues[0].tx_ring[i].addr = 0; in macb_init_rings()
1542 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); in macb_init_rings()
1544 bp->queues[0].tx_head = 0; in macb_init_rings()
1545 bp->queues[0].tx_tail = 0; in macb_init_rings()
1546 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); in macb_init_rings()
1548 bp->rx_tail = 0; in macb_init_rings()
1551 static void macb_reset_hw(struct macb *bp) in macb_reset_hw() argument
1560 macb_writel(bp, NCR, 0); in macb_reset_hw()
1563 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); in macb_reset_hw()
1566 macb_writel(bp, TSR, -1); in macb_reset_hw()
1567 macb_writel(bp, RSR, -1); in macb_reset_hw()
1570 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
1576 static u32 gem_mdc_clk_div(struct macb *bp) in gem_mdc_clk_div() argument
1579 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
1597 static u32 macb_mdc_clk_div(struct macb *bp) in macb_mdc_clk_div() argument
1602 if (macb_is_gem(bp)) in macb_mdc_clk_div()
1603 return gem_mdc_clk_div(bp); in macb_mdc_clk_div()
1605 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
1623 static u32 macb_dbw(struct macb *bp) in macb_dbw() argument
1625 if (!macb_is_gem(bp)) in macb_dbw()
1628 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { in macb_dbw()
1647 static void macb_configure_dma(struct macb *bp) in macb_configure_dma() argument
1651 if (macb_is_gem(bp)) { in macb_configure_dma()
1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
1653 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); in macb_configure_dma()
1654 if (bp->dma_burst_length) in macb_configure_dma()
1655 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
1659 if (bp->native_io) in macb_configure_dma()
1664 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
1668 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
1670 gem_writel(bp, DMACFG, dmacfg); in macb_configure_dma()
1674 static void macb_init_hw(struct macb *bp) in macb_init_hw() argument
1681 macb_reset_hw(bp); in macb_init_hw()
1682 macb_set_hwaddr(bp); in macb_init_hw()
1684 config = macb_mdc_clk_div(bp); in macb_init_hw()
1685 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init_hw()
1690 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
1694 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
1696 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
1698 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
1700 config |= macb_dbw(bp); in macb_init_hw()
1701 macb_writel(bp, NCFGR, config); in macb_init_hw()
1702 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
1703 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
1704 bp->speed = SPEED_10; in macb_init_hw()
1705 bp->duplex = DUPLEX_HALF; in macb_init_hw()
1706 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
1707 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
1708 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
1710 macb_configure_dma(bp); in macb_init_hw()
1713 macb_writel(bp, RBQP, bp->rx_ring_dma); in macb_init_hw()
1714 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_hw()
1725 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); in macb_init_hw()
1795 struct macb *bp = netdev_priv(dev); in macb_sethashtable() local
1804 macb_or_gem_writel(bp, HRB, mc_filter[0]); in macb_sethashtable()
1805 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
1814 struct macb *bp = netdev_priv(dev); in macb_set_rx_mode() local
1816 cfg = macb_readl(bp, NCFGR); in macb_set_rx_mode()
1823 if (macb_is_gem(bp)) in macb_set_rx_mode()
1830 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
1836 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
1837 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
1845 macb_or_gem_writel(bp, HRB, 0); in macb_set_rx_mode()
1846 macb_or_gem_writel(bp, HRT, 0); in macb_set_rx_mode()
1850 macb_writel(bp, NCFGR, cfg); in macb_set_rx_mode()
1855 struct macb *bp = netdev_priv(dev); in macb_open() local
1859 netdev_dbg(bp->dev, "open\n"); in macb_open()
1865 if (!bp->phy_dev) in macb_open()
1869 macb_init_rx_buffer_size(bp, bufsz); in macb_open()
1871 err = macb_alloc_consistent(bp); in macb_open()
1878 napi_enable(&bp->napi); in macb_open()
1880 bp->macbgem_ops.mog_init_rings(bp); in macb_open()
1881 macb_init_hw(bp); in macb_open()
1884 phy_start(bp->phy_dev); in macb_open()
1893 struct macb *bp = netdev_priv(dev); in macb_close() local
1897 napi_disable(&bp->napi); in macb_close()
1899 if (bp->phy_dev) in macb_close()
1900 phy_stop(bp->phy_dev); in macb_close()
1902 spin_lock_irqsave(&bp->lock, flags); in macb_close()
1903 macb_reset_hw(bp); in macb_close()
1905 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
1907 macb_free_consistent(bp); in macb_close()
1914 struct macb *bp = netdev_priv(dev); in macb_change_mtu() local
1921 if (bp->caps & MACB_CAPS_JUMBO) in macb_change_mtu()
1922 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_change_mtu()
1932 static void gem_update_stats(struct macb *bp) in gem_update_stats() argument
1935 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
1939 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
1941 bp->ethtool_stats[i] += val; in gem_update_stats()
1946 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
1947 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
1953 static struct net_device_stats *gem_get_stats(struct macb *bp) in gem_get_stats() argument
1955 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
1956 struct net_device_stats *nstat = &bp->stats; in gem_get_stats()
1958 gem_update_stats(bp); in gem_get_stats()
1994 struct macb *bp; in gem_get_ethtool_stats() local
1996 bp = netdev_priv(dev); in gem_get_ethtool_stats()
1997 gem_update_stats(bp); in gem_get_ethtool_stats()
1998 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); in gem_get_ethtool_stats()
2026 struct macb *bp = netdev_priv(dev); in macb_get_stats() local
2027 struct net_device_stats *nstat = &bp->stats; in macb_get_stats()
2028 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
2030 if (macb_is_gem(bp)) in macb_get_stats()
2031 return gem_get_stats(bp); in macb_get_stats()
2034 macb_update_stats(bp); in macb_get_stats()
2073 struct macb *bp = netdev_priv(dev); in macb_get_settings() local
2074 struct phy_device *phydev = bp->phy_dev; in macb_get_settings()
2084 struct macb *bp = netdev_priv(dev); in macb_set_settings() local
2085 struct phy_device *phydev = bp->phy_dev; in macb_set_settings()
2101 struct macb *bp = netdev_priv(dev); in macb_get_regs() local
2105 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
2108 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); in macb_get_regs()
2109 head = macb_tx_ring_wrap(bp->queues[0].tx_head); in macb_get_regs()
2111 regs_buff[0] = macb_readl(bp, NCR); in macb_get_regs()
2112 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
2113 regs_buff[2] = macb_readl(bp, NSR); in macb_get_regs()
2114 regs_buff[3] = macb_readl(bp, TSR); in macb_get_regs()
2115 regs_buff[4] = macb_readl(bp, RBQP); in macb_get_regs()
2116 regs_buff[5] = macb_readl(bp, TBQP); in macb_get_regs()
2117 regs_buff[6] = macb_readl(bp, RSR); in macb_get_regs()
2118 regs_buff[7] = macb_readl(bp, IMR); in macb_get_regs()
2122 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
2123 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
2125 regs_buff[12] = macb_or_gem_readl(bp, USRIO); in macb_get_regs()
2126 if (macb_is_gem(bp)) { in macb_get_regs()
2127 regs_buff[13] = gem_readl(bp, DMACFG); in macb_get_regs()
2154 struct macb *bp = netdev_priv(dev); in macb_ioctl() local
2155 struct phy_device *phydev = bp->phy_dev; in macb_ioctl()
2169 struct macb *bp = netdev_priv(netdev); in macb_set_features() local
2173 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { in macb_set_features()
2176 dmacfg = gem_readl(bp, DMACFG); in macb_set_features()
2181 gem_writel(bp, DMACFG, dmacfg); in macb_set_features()
2185 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { in macb_set_features()
2188 netcfg = gem_readl(bp, NCFGR); in macb_set_features()
2194 gem_writel(bp, NCFGR, netcfg); in macb_set_features()
2220 static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) in macb_configure_caps() argument
2225 bp->caps = dt_conf->caps; in macb_configure_caps()
2227 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
2228 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
2230 dcfg = gem_readl(bp, DCFG1); in macb_configure_caps()
2232 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
2233 dcfg = gem_readl(bp, DCFG2); in macb_configure_caps()
2235 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
2238 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
2326 struct macb *bp = netdev_priv(dev); in macb_init() local
2336 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
2339 queue = &bp->queues[q]; in macb_init()
2340 queue->bp = bp; in macb_init()
2376 netif_napi_add(dev, &bp->napi, macb_poll, 64); in macb_init()
2379 if (macb_is_gem(bp)) { in macb_init()
2380 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
2381 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
2382 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
2383 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
2384 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
2387 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
2388 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
2389 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
2390 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
2391 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
2398 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
2400 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
2405 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) in macb_init()
2407 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
2408 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) in macb_init()
2410 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) in macb_init()
2413 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
2416 macb_or_gem_writel(bp, USRIO, val); in macb_init()
2419 val = macb_mdc_clk_div(bp); in macb_init()
2420 val |= macb_dbw(bp); in macb_init()
2421 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
2423 macb_writel(bp, NCFGR, val); in macb_init()
2717 struct macb *bp = netdev_priv(dev); in at91ether_init() local
2729 macb_writel(bp, NCR, 0); in at91ether_init()
2732 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) in at91ether_init()
2735 macb_writel(bp, NCFGR, reg); in at91ether_init()
2830 struct macb *bp; in macb_probe() local
2856 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); in macb_probe()
2866 bp = netdev_priv(dev); in macb_probe()
2867 bp->pdev = pdev; in macb_probe()
2868 bp->dev = dev; in macb_probe()
2869 bp->regs = mem; in macb_probe()
2870 bp->native_io = native_io; in macb_probe()
2872 bp->macb_reg_readl = hw_readl_native; in macb_probe()
2873 bp->macb_reg_writel = hw_writel_native; in macb_probe()
2875 bp->macb_reg_readl = hw_readl; in macb_probe()
2876 bp->macb_reg_writel = hw_writel; in macb_probe()
2878 bp->num_queues = num_queues; in macb_probe()
2879 bp->queue_mask = queue_mask; in macb_probe()
2881 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
2882 bp->pclk = pclk; in macb_probe()
2883 bp->hclk = hclk; in macb_probe()
2884 bp->tx_clk = tx_clk; in macb_probe()
2886 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
2888 spin_lock_init(&bp->lock); in macb_probe()
2891 macb_configure_caps(bp, macb_config); in macb_probe()
2903 memcpy(bp->dev->dev_addr, mac, ETH_ALEN); in macb_probe()
2905 macb_get_hwaddr(bp); in macb_probe()
2911 bp->phy_interface = PHY_INTERFACE_MODE_RMII; in macb_probe()
2913 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
2915 bp->phy_interface = err; in macb_probe()
2929 err = macb_mii_init(bp); in macb_probe()
2936 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), in macb_probe()
2939 phydev = bp->phy_dev; in macb_probe()
2962 struct macb *bp; in macb_remove() local
2967 bp = netdev_priv(dev); in macb_remove()
2968 if (bp->phy_dev) in macb_remove()
2969 phy_disconnect(bp->phy_dev); in macb_remove()
2970 mdiobus_unregister(bp->mii_bus); in macb_remove()
2971 kfree(bp->mii_bus->irq); in macb_remove()
2972 mdiobus_free(bp->mii_bus); in macb_remove()
2974 clk_disable_unprepare(bp->tx_clk); in macb_remove()
2975 clk_disable_unprepare(bp->hclk); in macb_remove()
2976 clk_disable_unprepare(bp->pclk); in macb_remove()
2987 struct macb *bp = netdev_priv(netdev); in macb_suspend() local
2992 clk_disable_unprepare(bp->tx_clk); in macb_suspend()
2993 clk_disable_unprepare(bp->hclk); in macb_suspend()
2994 clk_disable_unprepare(bp->pclk); in macb_suspend()
3003 struct macb *bp = netdev_priv(netdev); in macb_resume() local
3005 clk_prepare_enable(bp->pclk); in macb_resume()
3006 clk_prepare_enable(bp->hclk); in macb_resume()
3007 clk_prepare_enable(bp->tx_clk); in macb_resume()