1/*
2 * Cadence MACB/GEM Ethernet Controller driver
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/interrupt.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_data/macb.h>
27#include <linux/platform_device.h>
28#include <linux/phy.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_mdio.h>
32#include <linux/of_net.h>
33
34#include "macb.h"
35
36#define MACB_RX_BUFFER_SIZE	128
37#define RX_BUFFER_MULTIPLE	64  /* bytes */
38#define RX_RING_SIZE		512 /* must be power of 2 */
39#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
40
41#define TX_RING_SIZE		128 /* must be power of 2 */
42#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
43
44/* level of occupied TX descriptors under which we wake up TX process */
45#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
46
47#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
48				 | MACB_BIT(ISR_ROVR))
49#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
50					| MACB_BIT(ISR_RLE)		\
51					| MACB_BIT(TXERR))
52#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
53
54#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
55#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
56
57/*
58 * Graceful stop timeouts in us. We should allow up to
59 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
60 */
61#define MACB_HALT_TIMEOUT	1230
62
63/* Ring buffer accessors */
64static unsigned int macb_tx_ring_wrap(unsigned int index)
65{
66	return index & (TX_RING_SIZE - 1);
67}
68
69static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
70					  unsigned int index)
71{
72	return &queue->tx_ring[macb_tx_ring_wrap(index)];
73}
74
75static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
76				       unsigned int index)
77{
78	return &queue->tx_skb[macb_tx_ring_wrap(index)];
79}
80
81static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
82{
83	dma_addr_t offset;
84
85	offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
86
87	return queue->tx_ring_dma + offset;
88}
89
90static unsigned int macb_rx_ring_wrap(unsigned int index)
91{
92	return index & (RX_RING_SIZE - 1);
93}
94
95static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
96{
97	return &bp->rx_ring[macb_rx_ring_wrap(index)];
98}
99
100static void *macb_rx_buffer(struct macb *bp, unsigned int index)
101{
102	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
103}
104
105static void macb_set_hwaddr(struct macb *bp)
106{
107	u32 bottom;
108	u16 top;
109
110	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
111	macb_or_gem_writel(bp, SA1B, bottom);
112	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
113	macb_or_gem_writel(bp, SA1T, top);
114
115	/* Clear unused address register sets */
116	macb_or_gem_writel(bp, SA2B, 0);
117	macb_or_gem_writel(bp, SA2T, 0);
118	macb_or_gem_writel(bp, SA3B, 0);
119	macb_or_gem_writel(bp, SA3T, 0);
120	macb_or_gem_writel(bp, SA4B, 0);
121	macb_or_gem_writel(bp, SA4T, 0);
122}
123
124static void macb_get_hwaddr(struct macb *bp)
125{
126	struct macb_platform_data *pdata;
127	u32 bottom;
128	u16 top;
129	u8 addr[6];
130	int i;
131
132	pdata = dev_get_platdata(&bp->pdev->dev);
133
134	/* Check all 4 address register for vaild address */
135	for (i = 0; i < 4; i++) {
136		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
137		top = macb_or_gem_readl(bp, SA1T + i * 8);
138
139		if (pdata && pdata->rev_eth_addr) {
140			addr[5] = bottom & 0xff;
141			addr[4] = (bottom >> 8) & 0xff;
142			addr[3] = (bottom >> 16) & 0xff;
143			addr[2] = (bottom >> 24) & 0xff;
144			addr[1] = top & 0xff;
145			addr[0] = (top & 0xff00) >> 8;
146		} else {
147			addr[0] = bottom & 0xff;
148			addr[1] = (bottom >> 8) & 0xff;
149			addr[2] = (bottom >> 16) & 0xff;
150			addr[3] = (bottom >> 24) & 0xff;
151			addr[4] = top & 0xff;
152			addr[5] = (top >> 8) & 0xff;
153		}
154
155		if (is_valid_ether_addr(addr)) {
156			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
157			return;
158		}
159	}
160
161	netdev_info(bp->dev, "invalid hw address, using random\n");
162	eth_hw_addr_random(bp->dev);
163}
164
165static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
166{
167	struct macb *bp = bus->priv;
168	int value;
169
170	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
171			      | MACB_BF(RW, MACB_MAN_READ)
172			      | MACB_BF(PHYA, mii_id)
173			      | MACB_BF(REGA, regnum)
174			      | MACB_BF(CODE, MACB_MAN_CODE)));
175
176	/* wait for end of transfer */
177	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
178		cpu_relax();
179
180	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
181
182	return value;
183}
184
185static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
186			   u16 value)
187{
188	struct macb *bp = bus->priv;
189
190	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
191			      | MACB_BF(RW, MACB_MAN_WRITE)
192			      | MACB_BF(PHYA, mii_id)
193			      | MACB_BF(REGA, regnum)
194			      | MACB_BF(CODE, MACB_MAN_CODE)
195			      | MACB_BF(DATA, value)));
196
197	/* wait for end of transfer */
198	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
199		cpu_relax();
200
201	return 0;
202}
203
204/**
205 * macb_set_tx_clk() - Set a clock to a new frequency
206 * @clk		Pointer to the clock to change
207 * @rate	New frequency in Hz
208 * @dev		Pointer to the struct net_device
209 */
210static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
211{
212	long ferr, rate, rate_rounded;
213
214	if (!clk)
215		return;
216
217	switch (speed) {
218	case SPEED_10:
219		rate = 2500000;
220		break;
221	case SPEED_100:
222		rate = 25000000;
223		break;
224	case SPEED_1000:
225		rate = 125000000;
226		break;
227	default:
228		return;
229	}
230
231	rate_rounded = clk_round_rate(clk, rate);
232	if (rate_rounded < 0)
233		return;
234
235	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
236	 * is not satisfied.
237	 */
238	ferr = abs(rate_rounded - rate);
239	ferr = DIV_ROUND_UP(ferr, rate / 100000);
240	if (ferr > 5)
241		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
242				rate);
243
244	if (clk_set_rate(clk, rate_rounded))
245		netdev_err(dev, "adjusting tx_clk failed.\n");
246}
247
248static void macb_handle_link_change(struct net_device *dev)
249{
250	struct macb *bp = netdev_priv(dev);
251	struct phy_device *phydev = bp->phy_dev;
252	unsigned long flags;
253
254	int status_change = 0;
255
256	spin_lock_irqsave(&bp->lock, flags);
257
258	if (phydev->link) {
259		if ((bp->speed != phydev->speed) ||
260		    (bp->duplex != phydev->duplex)) {
261			u32 reg;
262
263			reg = macb_readl(bp, NCFGR);
264			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
265			if (macb_is_gem(bp))
266				reg &= ~GEM_BIT(GBE);
267
268			if (phydev->duplex)
269				reg |= MACB_BIT(FD);
270			if (phydev->speed == SPEED_100)
271				reg |= MACB_BIT(SPD);
272			if (phydev->speed == SPEED_1000 &&
273			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
274				reg |= GEM_BIT(GBE);
275
276			macb_or_gem_writel(bp, NCFGR, reg);
277
278			bp->speed = phydev->speed;
279			bp->duplex = phydev->duplex;
280			status_change = 1;
281		}
282	}
283
284	if (phydev->link != bp->link) {
285		if (!phydev->link) {
286			bp->speed = 0;
287			bp->duplex = -1;
288		}
289		bp->link = phydev->link;
290
291		status_change = 1;
292	}
293
294	spin_unlock_irqrestore(&bp->lock, flags);
295
296	if (status_change) {
297		if (phydev->link) {
298			/* Update the TX clock rate if and only if the link is
299			 * up and there has been a link change.
300			 */
301			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
302
303			netif_carrier_on(dev);
304			netdev_info(dev, "link up (%d/%s)\n",
305				    phydev->speed,
306				    phydev->duplex == DUPLEX_FULL ?
307				    "Full" : "Half");
308		} else {
309			netif_carrier_off(dev);
310			netdev_info(dev, "link down\n");
311		}
312	}
313}
314
315/* based on au1000_eth. c*/
316static int macb_mii_probe(struct net_device *dev)
317{
318	struct macb *bp = netdev_priv(dev);
319	struct macb_platform_data *pdata;
320	struct phy_device *phydev;
321	int phy_irq;
322	int ret;
323
324	phydev = phy_find_first(bp->mii_bus);
325	if (!phydev) {
326		netdev_err(dev, "no PHY found\n");
327		return -ENXIO;
328	}
329
330	pdata = dev_get_platdata(&bp->pdev->dev);
331	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
332		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
333		if (!ret) {
334			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
335			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
336		}
337	}
338
339	/* attach the mac to the phy */
340	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
341				 bp->phy_interface);
342	if (ret) {
343		netdev_err(dev, "Could not attach to PHY\n");
344		return ret;
345	}
346
347	/* mask with MAC supported features */
348	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
349		phydev->supported &= PHY_GBIT_FEATURES;
350	else
351		phydev->supported &= PHY_BASIC_FEATURES;
352
353	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
354		phydev->supported &= ~SUPPORTED_1000baseT_Half;
355
356	phydev->advertising = phydev->supported;
357
358	bp->link = 0;
359	bp->speed = 0;
360	bp->duplex = -1;
361	bp->phy_dev = phydev;
362
363	return 0;
364}
365
366static int macb_mii_init(struct macb *bp)
367{
368	struct macb_platform_data *pdata;
369	struct device_node *np;
370	int err = -ENXIO, i;
371
372	/* Enable management port */
373	macb_writel(bp, NCR, MACB_BIT(MPE));
374
375	bp->mii_bus = mdiobus_alloc();
376	if (bp->mii_bus == NULL) {
377		err = -ENOMEM;
378		goto err_out;
379	}
380
381	bp->mii_bus->name = "MACB_mii_bus";
382	bp->mii_bus->read = &macb_mdio_read;
383	bp->mii_bus->write = &macb_mdio_write;
384	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
385		bp->pdev->name, bp->pdev->id);
386	bp->mii_bus->priv = bp;
387	bp->mii_bus->parent = &bp->dev->dev;
388	pdata = dev_get_platdata(&bp->pdev->dev);
389
390	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
391	if (!bp->mii_bus->irq) {
392		err = -ENOMEM;
393		goto err_out_free_mdiobus;
394	}
395
396	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
397
398	np = bp->pdev->dev.of_node;
399	if (np) {
400		/* try dt phy registration */
401		err = of_mdiobus_register(bp->mii_bus, np);
402
403		/* fallback to standard phy registration if no phy were
404		   found during dt phy registration */
405		if (!err && !phy_find_first(bp->mii_bus)) {
406			for (i = 0; i < PHY_MAX_ADDR; i++) {
407				struct phy_device *phydev;
408
409				phydev = mdiobus_scan(bp->mii_bus, i);
410				if (IS_ERR(phydev)) {
411					err = PTR_ERR(phydev);
412					break;
413				}
414			}
415
416			if (err)
417				goto err_out_unregister_bus;
418		}
419	} else {
420		for (i = 0; i < PHY_MAX_ADDR; i++)
421			bp->mii_bus->irq[i] = PHY_POLL;
422
423		if (pdata)
424			bp->mii_bus->phy_mask = pdata->phy_mask;
425
426		err = mdiobus_register(bp->mii_bus);
427	}
428
429	if (err)
430		goto err_out_free_mdio_irq;
431
432	err = macb_mii_probe(bp->dev);
433	if (err)
434		goto err_out_unregister_bus;
435
436	return 0;
437
438err_out_unregister_bus:
439	mdiobus_unregister(bp->mii_bus);
440err_out_free_mdio_irq:
441	kfree(bp->mii_bus->irq);
442err_out_free_mdiobus:
443	mdiobus_free(bp->mii_bus);
444err_out:
445	return err;
446}
447
448static void macb_update_stats(struct macb *bp)
449{
450	u32 __iomem *reg = bp->regs + MACB_PFR;
451	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
452	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
453
454	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
455
456	for(; p < end; p++, reg++)
457		*p += readl_relaxed(reg);
458}
459
460static int macb_halt_tx(struct macb *bp)
461{
462	unsigned long	halt_time, timeout;
463	u32		status;
464
465	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
466
467	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
468	do {
469		halt_time = jiffies;
470		status = macb_readl(bp, TSR);
471		if (!(status & MACB_BIT(TGO)))
472			return 0;
473
474		usleep_range(10, 250);
475	} while (time_before(halt_time, timeout));
476
477	return -ETIMEDOUT;
478}
479
480static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
481{
482	if (tx_skb->mapping) {
483		if (tx_skb->mapped_as_page)
484			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
485				       tx_skb->size, DMA_TO_DEVICE);
486		else
487			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
488					 tx_skb->size, DMA_TO_DEVICE);
489		tx_skb->mapping = 0;
490	}
491
492	if (tx_skb->skb) {
493		dev_kfree_skb_any(tx_skb->skb);
494		tx_skb->skb = NULL;
495	}
496}
497
498static void macb_tx_error_task(struct work_struct *work)
499{
500	struct macb_queue	*queue = container_of(work, struct macb_queue,
501						      tx_error_task);
502	struct macb		*bp = queue->bp;
503	struct macb_tx_skb	*tx_skb;
504	struct macb_dma_desc	*desc;
505	struct sk_buff		*skb;
506	unsigned int		tail;
507	unsigned long		flags;
508
509	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
510		    (unsigned int)(queue - bp->queues),
511		    queue->tx_tail, queue->tx_head);
512
513	/* Prevent the queue IRQ handlers from running: each of them may call
514	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
515	 * As explained below, we have to halt the transmission before updating
516	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
517	 * network engine about the macb/gem being halted.
518	 */
519	spin_lock_irqsave(&bp->lock, flags);
520
521	/* Make sure nobody is trying to queue up new packets */
522	netif_tx_stop_all_queues(bp->dev);
523
524	/*
525	 * Stop transmission now
526	 * (in case we have just queued new packets)
527	 * macb/gem must be halted to write TBQP register
528	 */
529	if (macb_halt_tx(bp))
530		/* Just complain for now, reinitializing TX path can be good */
531		netdev_err(bp->dev, "BUG: halt tx timed out\n");
532
533	/*
534	 * Treat frames in TX queue including the ones that caused the error.
535	 * Free transmit buffers in upper layer.
536	 */
537	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
538		u32	ctrl;
539
540		desc = macb_tx_desc(queue, tail);
541		ctrl = desc->ctrl;
542		tx_skb = macb_tx_skb(queue, tail);
543		skb = tx_skb->skb;
544
545		if (ctrl & MACB_BIT(TX_USED)) {
546			/* skb is set for the last buffer of the frame */
547			while (!skb) {
548				macb_tx_unmap(bp, tx_skb);
549				tail++;
550				tx_skb = macb_tx_skb(queue, tail);
551				skb = tx_skb->skb;
552			}
553
554			/* ctrl still refers to the first buffer descriptor
555			 * since it's the only one written back by the hardware
556			 */
557			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
558				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
559					    macb_tx_ring_wrap(tail), skb->data);
560				bp->stats.tx_packets++;
561				bp->stats.tx_bytes += skb->len;
562			}
563		} else {
564			/*
565			 * "Buffers exhausted mid-frame" errors may only happen
566			 * if the driver is buggy, so complain loudly about those.
567			 * Statistics are updated by hardware.
568			 */
569			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
570				netdev_err(bp->dev,
571					   "BUG: TX buffers exhausted mid-frame\n");
572
573			desc->ctrl = ctrl | MACB_BIT(TX_USED);
574		}
575
576		macb_tx_unmap(bp, tx_skb);
577	}
578
579	/* Set end of TX queue */
580	desc = macb_tx_desc(queue, 0);
581	desc->addr = 0;
582	desc->ctrl = MACB_BIT(TX_USED);
583
584	/* Make descriptor updates visible to hardware */
585	wmb();
586
587	/* Reinitialize the TX desc queue */
588	queue_writel(queue, TBQP, queue->tx_ring_dma);
589	/* Make TX ring reflect state of hardware */
590	queue->tx_head = 0;
591	queue->tx_tail = 0;
592
593	/* Housework before enabling TX IRQ */
594	macb_writel(bp, TSR, macb_readl(bp, TSR));
595	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
596
597	/* Now we are ready to start transmission again */
598	netif_tx_start_all_queues(bp->dev);
599	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
600
601	spin_unlock_irqrestore(&bp->lock, flags);
602}
603
604static void macb_tx_interrupt(struct macb_queue *queue)
605{
606	unsigned int tail;
607	unsigned int head;
608	u32 status;
609	struct macb *bp = queue->bp;
610	u16 queue_index = queue - bp->queues;
611
612	status = macb_readl(bp, TSR);
613	macb_writel(bp, TSR, status);
614
615	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
616		queue_writel(queue, ISR, MACB_BIT(TCOMP));
617
618	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
619		(unsigned long)status);
620
621	head = queue->tx_head;
622	for (tail = queue->tx_tail; tail != head; tail++) {
623		struct macb_tx_skb	*tx_skb;
624		struct sk_buff		*skb;
625		struct macb_dma_desc	*desc;
626		u32			ctrl;
627
628		desc = macb_tx_desc(queue, tail);
629
630		/* Make hw descriptor updates visible to CPU */
631		rmb();
632
633		ctrl = desc->ctrl;
634
635		/* TX_USED bit is only set by hardware on the very first buffer
636		 * descriptor of the transmitted frame.
637		 */
638		if (!(ctrl & MACB_BIT(TX_USED)))
639			break;
640
641		/* Process all buffers of the current transmitted frame */
642		for (;; tail++) {
643			tx_skb = macb_tx_skb(queue, tail);
644			skb = tx_skb->skb;
645
646			/* First, update TX stats if needed */
647			if (skb) {
648				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
649					    macb_tx_ring_wrap(tail), skb->data);
650				bp->stats.tx_packets++;
651				bp->stats.tx_bytes += skb->len;
652			}
653
654			/* Now we can safely release resources */
655			macb_tx_unmap(bp, tx_skb);
656
657			/* skb is set only for the last buffer of the frame.
658			 * WARNING: at this point skb has been freed by
659			 * macb_tx_unmap().
660			 */
661			if (skb)
662				break;
663		}
664	}
665
666	queue->tx_tail = tail;
667	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
668	    CIRC_CNT(queue->tx_head, queue->tx_tail,
669		     TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
670		netif_wake_subqueue(bp->dev, queue_index);
671}
672
673static void gem_rx_refill(struct macb *bp)
674{
675	unsigned int		entry;
676	struct sk_buff		*skb;
677	dma_addr_t		paddr;
678
679	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
680		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
681
682		/* Make hw descriptor updates visible to CPU */
683		rmb();
684
685		bp->rx_prepared_head++;
686
687		if (bp->rx_skbuff[entry] == NULL) {
688			/* allocate sk_buff for this free entry in ring */
689			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
690			if (unlikely(skb == NULL)) {
691				netdev_err(bp->dev,
692					   "Unable to allocate sk_buff\n");
693				break;
694			}
695
696			/* now fill corresponding descriptor entry */
697			paddr = dma_map_single(&bp->pdev->dev, skb->data,
698					       bp->rx_buffer_size, DMA_FROM_DEVICE);
699			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
700				dev_kfree_skb(skb);
701				break;
702			}
703
704			bp->rx_skbuff[entry] = skb;
705
706			if (entry == RX_RING_SIZE - 1)
707				paddr |= MACB_BIT(RX_WRAP);
708			bp->rx_ring[entry].addr = paddr;
709			bp->rx_ring[entry].ctrl = 0;
710
711			/* properly align Ethernet header */
712			skb_reserve(skb, NET_IP_ALIGN);
713		} else {
714			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
715			bp->rx_ring[entry].ctrl = 0;
716		}
717	}
718
719	/* Make descriptor updates visible to hardware */
720	wmb();
721
722	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
723		   bp->rx_prepared_head, bp->rx_tail);
724}
725
726/* Mark DMA descriptors from begin up to and not including end as unused */
727static void discard_partial_frame(struct macb *bp, unsigned int begin,
728				  unsigned int end)
729{
730	unsigned int frag;
731
732	for (frag = begin; frag != end; frag++) {
733		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
734		desc->addr &= ~MACB_BIT(RX_USED);
735	}
736
737	/* Make descriptor updates visible to hardware */
738	wmb();
739
740	/*
741	 * When this happens, the hardware stats registers for
742	 * whatever caused this is updated, so we don't have to record
743	 * anything.
744	 */
745}
746
747static int gem_rx(struct macb *bp, int budget)
748{
749	unsigned int		len;
750	unsigned int		entry;
751	struct sk_buff		*skb;
752	struct macb_dma_desc	*desc;
753	int			count = 0;
754
755	while (count < budget) {
756		u32 addr, ctrl;
757
758		entry = macb_rx_ring_wrap(bp->rx_tail);
759		desc = &bp->rx_ring[entry];
760
761		/* Make hw descriptor updates visible to CPU */
762		rmb();
763
764		addr = desc->addr;
765		ctrl = desc->ctrl;
766
767		if (!(addr & MACB_BIT(RX_USED)))
768			break;
769
770		bp->rx_tail++;
771		count++;
772
773		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
774			netdev_err(bp->dev,
775				   "not whole frame pointed by descriptor\n");
776			bp->stats.rx_dropped++;
777			break;
778		}
779		skb = bp->rx_skbuff[entry];
780		if (unlikely(!skb)) {
781			netdev_err(bp->dev,
782				   "inconsistent Rx descriptor chain\n");
783			bp->stats.rx_dropped++;
784			break;
785		}
786		/* now everything is ready for receiving packet */
787		bp->rx_skbuff[entry] = NULL;
788		len = MACB_BFEXT(RX_FRMLEN, ctrl);
789
790		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
791
792		skb_put(skb, len);
793		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
794		dma_unmap_single(&bp->pdev->dev, addr,
795				 bp->rx_buffer_size, DMA_FROM_DEVICE);
796
797		skb->protocol = eth_type_trans(skb, bp->dev);
798		skb_checksum_none_assert(skb);
799		if (bp->dev->features & NETIF_F_RXCSUM &&
800		    !(bp->dev->flags & IFF_PROMISC) &&
801		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
802			skb->ip_summed = CHECKSUM_UNNECESSARY;
803
804		bp->stats.rx_packets++;
805		bp->stats.rx_bytes += skb->len;
806
807#if defined(DEBUG) && defined(VERBOSE_DEBUG)
808		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
809			    skb->len, skb->csum);
810		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
811			       skb_mac_header(skb), 16, true);
812		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
813			       skb->data, 32, true);
814#endif
815
816		netif_receive_skb(skb);
817	}
818
819	gem_rx_refill(bp);
820
821	return count;
822}
823
824static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
825			 unsigned int last_frag)
826{
827	unsigned int len;
828	unsigned int frag;
829	unsigned int offset;
830	struct sk_buff *skb;
831	struct macb_dma_desc *desc;
832
833	desc = macb_rx_desc(bp, last_frag);
834	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
835
836	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
837		macb_rx_ring_wrap(first_frag),
838		macb_rx_ring_wrap(last_frag), len);
839
840	/*
841	 * The ethernet header starts NET_IP_ALIGN bytes into the
842	 * first buffer. Since the header is 14 bytes, this makes the
843	 * payload word-aligned.
844	 *
845	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
846	 * the two padding bytes into the skb so that we avoid hitting
847	 * the slowpath in memcpy(), and pull them off afterwards.
848	 */
849	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
850	if (!skb) {
851		bp->stats.rx_dropped++;
852		for (frag = first_frag; ; frag++) {
853			desc = macb_rx_desc(bp, frag);
854			desc->addr &= ~MACB_BIT(RX_USED);
855			if (frag == last_frag)
856				break;
857		}
858
859		/* Make descriptor updates visible to hardware */
860		wmb();
861
862		return 1;
863	}
864
865	offset = 0;
866	len += NET_IP_ALIGN;
867	skb_checksum_none_assert(skb);
868	skb_put(skb, len);
869
870	for (frag = first_frag; ; frag++) {
871		unsigned int frag_len = bp->rx_buffer_size;
872
873		if (offset + frag_len > len) {
874			BUG_ON(frag != last_frag);
875			frag_len = len - offset;
876		}
877		skb_copy_to_linear_data_offset(skb, offset,
878				macb_rx_buffer(bp, frag), frag_len);
879		offset += bp->rx_buffer_size;
880		desc = macb_rx_desc(bp, frag);
881		desc->addr &= ~MACB_BIT(RX_USED);
882
883		if (frag == last_frag)
884			break;
885	}
886
887	/* Make descriptor updates visible to hardware */
888	wmb();
889
890	__skb_pull(skb, NET_IP_ALIGN);
891	skb->protocol = eth_type_trans(skb, bp->dev);
892
893	bp->stats.rx_packets++;
894	bp->stats.rx_bytes += skb->len;
895	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
896		   skb->len, skb->csum);
897	netif_receive_skb(skb);
898
899	return 0;
900}
901
902static int macb_rx(struct macb *bp, int budget)
903{
904	int received = 0;
905	unsigned int tail;
906	int first_frag = -1;
907
908	for (tail = bp->rx_tail; budget > 0; tail++) {
909		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
910		u32 addr, ctrl;
911
912		/* Make hw descriptor updates visible to CPU */
913		rmb();
914
915		addr = desc->addr;
916		ctrl = desc->ctrl;
917
918		if (!(addr & MACB_BIT(RX_USED)))
919			break;
920
921		if (ctrl & MACB_BIT(RX_SOF)) {
922			if (first_frag != -1)
923				discard_partial_frame(bp, first_frag, tail);
924			first_frag = tail;
925		}
926
927		if (ctrl & MACB_BIT(RX_EOF)) {
928			int dropped;
929			BUG_ON(first_frag == -1);
930
931			dropped = macb_rx_frame(bp, first_frag, tail);
932			first_frag = -1;
933			if (!dropped) {
934				received++;
935				budget--;
936			}
937		}
938	}
939
940	if (first_frag != -1)
941		bp->rx_tail = first_frag;
942	else
943		bp->rx_tail = tail;
944
945	return received;
946}
947
948static int macb_poll(struct napi_struct *napi, int budget)
949{
950	struct macb *bp = container_of(napi, struct macb, napi);
951	int work_done;
952	u32 status;
953
954	status = macb_readl(bp, RSR);
955	macb_writel(bp, RSR, status);
956
957	work_done = 0;
958
959	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
960		   (unsigned long)status, budget);
961
962	work_done = bp->macbgem_ops.mog_rx(bp, budget);
963	if (work_done < budget) {
964		napi_complete(napi);
965
966		/* Packets received while interrupts were disabled */
967		status = macb_readl(bp, RSR);
968		if (status) {
969			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
970				macb_writel(bp, ISR, MACB_BIT(RCOMP));
971			napi_reschedule(napi);
972		} else {
973			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
974		}
975	}
976
977	/* TODO: Handle errors */
978
979	return work_done;
980}
981
982static irqreturn_t macb_interrupt(int irq, void *dev_id)
983{
984	struct macb_queue *queue = dev_id;
985	struct macb *bp = queue->bp;
986	struct net_device *dev = bp->dev;
987	u32 status, ctrl;
988
989	status = queue_readl(queue, ISR);
990
991	if (unlikely(!status))
992		return IRQ_NONE;
993
994	spin_lock(&bp->lock);
995
996	while (status) {
997		/* close possible race with dev_close */
998		if (unlikely(!netif_running(dev))) {
999			queue_writel(queue, IDR, -1);
1000			break;
1001		}
1002
1003		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1004			    (unsigned int)(queue - bp->queues),
1005			    (unsigned long)status);
1006
1007		if (status & MACB_RX_INT_FLAGS) {
1008			/*
1009			 * There's no point taking any more interrupts
1010			 * until we have processed the buffers. The
1011			 * scheduling call may fail if the poll routine
1012			 * is already scheduled, so disable interrupts
1013			 * now.
1014			 */
1015			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1016			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1017				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1018
1019			if (napi_schedule_prep(&bp->napi)) {
1020				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1021				__napi_schedule(&bp->napi);
1022			}
1023		}
1024
1025		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1026			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1027			schedule_work(&queue->tx_error_task);
1028
1029			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1030				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1031
1032			break;
1033		}
1034
1035		if (status & MACB_BIT(TCOMP))
1036			macb_tx_interrupt(queue);
1037
1038		/*
1039		 * Link change detection isn't possible with RMII, so we'll
1040		 * add that if/when we get our hands on a full-blown MII PHY.
1041		 */
1042
1043		/* There is a hardware issue under heavy load where DMA can
1044		 * stop, this causes endless "used buffer descriptor read"
1045		 * interrupts but it can be cleared by re-enabling RX. See
1046		 * the at91 manual, section 41.3.1 or the Zynq manual
1047		 * section 16.7.4 for details.
1048		 */
1049		if (status & MACB_BIT(RXUBR)) {
1050			ctrl = macb_readl(bp, NCR);
1051			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1052			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1053
1054			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1055				macb_writel(bp, ISR, MACB_BIT(RXUBR));
1056		}
1057
1058		if (status & MACB_BIT(ISR_ROVR)) {
1059			/* We missed at least one packet */
1060			if (macb_is_gem(bp))
1061				bp->hw_stats.gem.rx_overruns++;
1062			else
1063				bp->hw_stats.macb.rx_overruns++;
1064
1065			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1066				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1067		}
1068
1069		if (status & MACB_BIT(HRESP)) {
1070			/*
1071			 * TODO: Reset the hardware, and maybe move the
1072			 * netdev_err to a lower-priority context as well
1073			 * (work queue?)
1074			 */
1075			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1076
1077			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1078				queue_writel(queue, ISR, MACB_BIT(HRESP));
1079		}
1080
1081		status = queue_readl(queue, ISR);
1082	}
1083
1084	spin_unlock(&bp->lock);
1085
1086	return IRQ_HANDLED;
1087}
1088
1089#ifdef CONFIG_NET_POLL_CONTROLLER
1090/*
1091 * Polling receive - used by netconsole and other diagnostic tools
1092 * to allow network i/o with interrupts disabled.
1093 */
1094static void macb_poll_controller(struct net_device *dev)
1095{
1096	struct macb *bp = netdev_priv(dev);
1097	struct macb_queue *queue;
1098	unsigned long flags;
1099	unsigned int q;
1100
1101	local_irq_save(flags);
1102	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1103		macb_interrupt(dev->irq, queue);
1104	local_irq_restore(flags);
1105}
1106#endif
1107
1108static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1109						     unsigned int len)
1110{
1111	return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1112}
1113
1114static unsigned int macb_tx_map(struct macb *bp,
1115				struct macb_queue *queue,
1116				struct sk_buff *skb)
1117{
1118	dma_addr_t mapping;
1119	unsigned int len, entry, i, tx_head = queue->tx_head;
1120	struct macb_tx_skb *tx_skb = NULL;
1121	struct macb_dma_desc *desc;
1122	unsigned int offset, size, count = 0;
1123	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1124	unsigned int eof = 1;
1125	u32 ctrl;
1126
1127	/* First, map non-paged data */
1128	len = skb_headlen(skb);
1129	offset = 0;
1130	while (len) {
1131		size = min(len, bp->max_tx_length);
1132		entry = macb_tx_ring_wrap(tx_head);
1133		tx_skb = &queue->tx_skb[entry];
1134
1135		mapping = dma_map_single(&bp->pdev->dev,
1136					 skb->data + offset,
1137					 size, DMA_TO_DEVICE);
1138		if (dma_mapping_error(&bp->pdev->dev, mapping))
1139			goto dma_error;
1140
1141		/* Save info to properly release resources */
1142		tx_skb->skb = NULL;
1143		tx_skb->mapping = mapping;
1144		tx_skb->size = size;
1145		tx_skb->mapped_as_page = false;
1146
1147		len -= size;
1148		offset += size;
1149		count++;
1150		tx_head++;
1151	}
1152
1153	/* Then, map paged data from fragments */
1154	for (f = 0; f < nr_frags; f++) {
1155		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1156
1157		len = skb_frag_size(frag);
1158		offset = 0;
1159		while (len) {
1160			size = min(len, bp->max_tx_length);
1161			entry = macb_tx_ring_wrap(tx_head);
1162			tx_skb = &queue->tx_skb[entry];
1163
1164			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1165						   offset, size, DMA_TO_DEVICE);
1166			if (dma_mapping_error(&bp->pdev->dev, mapping))
1167				goto dma_error;
1168
1169			/* Save info to properly release resources */
1170			tx_skb->skb = NULL;
1171			tx_skb->mapping = mapping;
1172			tx_skb->size = size;
1173			tx_skb->mapped_as_page = true;
1174
1175			len -= size;
1176			offset += size;
1177			count++;
1178			tx_head++;
1179		}
1180	}
1181
1182	/* Should never happen */
1183	if (unlikely(tx_skb == NULL)) {
1184		netdev_err(bp->dev, "BUG! empty skb!\n");
1185		return 0;
1186	}
1187
1188	/* This is the last buffer of the frame: save socket buffer */
1189	tx_skb->skb = skb;
1190
1191	/* Update TX ring: update buffer descriptors in reverse order
1192	 * to avoid race condition
1193	 */
1194
1195	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
1196	 * to set the end of TX queue
1197	 */
1198	i = tx_head;
1199	entry = macb_tx_ring_wrap(i);
1200	ctrl = MACB_BIT(TX_USED);
1201	desc = &queue->tx_ring[entry];
1202	desc->ctrl = ctrl;
1203
1204	do {
1205		i--;
1206		entry = macb_tx_ring_wrap(i);
1207		tx_skb = &queue->tx_skb[entry];
1208		desc = &queue->tx_ring[entry];
1209
1210		ctrl = (u32)tx_skb->size;
1211		if (eof) {
1212			ctrl |= MACB_BIT(TX_LAST);
1213			eof = 0;
1214		}
1215		if (unlikely(entry == (TX_RING_SIZE - 1)))
1216			ctrl |= MACB_BIT(TX_WRAP);
1217
1218		/* Set TX buffer descriptor */
1219		desc->addr = tx_skb->mapping;
1220		/* desc->addr must be visible to hardware before clearing
1221		 * 'TX_USED' bit in desc->ctrl.
1222		 */
1223		wmb();
1224		desc->ctrl = ctrl;
1225	} while (i != queue->tx_head);
1226
1227	queue->tx_head = tx_head;
1228
1229	return count;
1230
1231dma_error:
1232	netdev_err(bp->dev, "TX DMA map failed\n");
1233
1234	for (i = queue->tx_head; i != tx_head; i++) {
1235		tx_skb = macb_tx_skb(queue, i);
1236
1237		macb_tx_unmap(bp, tx_skb);
1238	}
1239
1240	return 0;
1241}
1242
1243static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1244{
1245	u16 queue_index = skb_get_queue_mapping(skb);
1246	struct macb *bp = netdev_priv(dev);
1247	struct macb_queue *queue = &bp->queues[queue_index];
1248	unsigned long flags;
1249	unsigned int count, nr_frags, frag_size, f;
1250
1251#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1252	netdev_vdbg(bp->dev,
1253		   "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1254		   queue_index, skb->len, skb->head, skb->data,
1255		   skb_tail_pointer(skb), skb_end_pointer(skb));
1256	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1257		       skb->data, 16, true);
1258#endif
1259
1260	/* Count how many TX buffer descriptors are needed to send this
1261	 * socket buffer: skb fragments of jumbo frames may need to be
1262	 * splitted into many buffer descriptors.
1263	 */
1264	count = macb_count_tx_descriptors(bp, skb_headlen(skb));
1265	nr_frags = skb_shinfo(skb)->nr_frags;
1266	for (f = 0; f < nr_frags; f++) {
1267		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1268		count += macb_count_tx_descriptors(bp, frag_size);
1269	}
1270
1271	spin_lock_irqsave(&bp->lock, flags);
1272
1273	/* This is a hard error, log it. */
1274	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1275		netif_stop_subqueue(dev, queue_index);
1276		spin_unlock_irqrestore(&bp->lock, flags);
1277		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1278			   queue->tx_head, queue->tx_tail);
1279		return NETDEV_TX_BUSY;
1280	}
1281
1282	/* Map socket buffer for DMA transfer */
1283	if (!macb_tx_map(bp, queue, skb)) {
1284		dev_kfree_skb_any(skb);
1285		goto unlock;
1286	}
1287
1288	/* Make newly initialized descriptor visible to hardware */
1289	wmb();
1290
1291	skb_tx_timestamp(skb);
1292
1293	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1294
1295	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1296		netif_stop_subqueue(dev, queue_index);
1297
1298unlock:
1299	spin_unlock_irqrestore(&bp->lock, flags);
1300
1301	return NETDEV_TX_OK;
1302}
1303
1304static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1305{
1306	if (!macb_is_gem(bp)) {
1307		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1308	} else {
1309		bp->rx_buffer_size = size;
1310
1311		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1312			netdev_dbg(bp->dev,
1313				    "RX buffer must be multiple of %d bytes, expanding\n",
1314				    RX_BUFFER_MULTIPLE);
1315			bp->rx_buffer_size =
1316				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1317		}
1318	}
1319
1320	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1321		   bp->dev->mtu, bp->rx_buffer_size);
1322}
1323
1324static void gem_free_rx_buffers(struct macb *bp)
1325{
1326	struct sk_buff		*skb;
1327	struct macb_dma_desc	*desc;
1328	dma_addr_t		addr;
1329	int i;
1330
1331	if (!bp->rx_skbuff)
1332		return;
1333
1334	for (i = 0; i < RX_RING_SIZE; i++) {
1335		skb = bp->rx_skbuff[i];
1336
1337		if (skb == NULL)
1338			continue;
1339
1340		desc = &bp->rx_ring[i];
1341		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1342		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1343				 DMA_FROM_DEVICE);
1344		dev_kfree_skb_any(skb);
1345		skb = NULL;
1346	}
1347
1348	kfree(bp->rx_skbuff);
1349	bp->rx_skbuff = NULL;
1350}
1351
1352static void macb_free_rx_buffers(struct macb *bp)
1353{
1354	if (bp->rx_buffers) {
1355		dma_free_coherent(&bp->pdev->dev,
1356				  RX_RING_SIZE * bp->rx_buffer_size,
1357				  bp->rx_buffers, bp->rx_buffers_dma);
1358		bp->rx_buffers = NULL;
1359	}
1360}
1361
1362static void macb_free_consistent(struct macb *bp)
1363{
1364	struct macb_queue *queue;
1365	unsigned int q;
1366
1367	bp->macbgem_ops.mog_free_rx_buffers(bp);
1368	if (bp->rx_ring) {
1369		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1370				  bp->rx_ring, bp->rx_ring_dma);
1371		bp->rx_ring = NULL;
1372	}
1373
1374	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1375		kfree(queue->tx_skb);
1376		queue->tx_skb = NULL;
1377		if (queue->tx_ring) {
1378			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1379					  queue->tx_ring, queue->tx_ring_dma);
1380			queue->tx_ring = NULL;
1381		}
1382	}
1383}
1384
1385static int gem_alloc_rx_buffers(struct macb *bp)
1386{
1387	int size;
1388
1389	size = RX_RING_SIZE * sizeof(struct sk_buff *);
1390	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1391	if (!bp->rx_skbuff)
1392		return -ENOMEM;
1393	else
1394		netdev_dbg(bp->dev,
1395			   "Allocated %d RX struct sk_buff entries at %p\n",
1396			   RX_RING_SIZE, bp->rx_skbuff);
1397	return 0;
1398}
1399
1400static int macb_alloc_rx_buffers(struct macb *bp)
1401{
1402	int size;
1403
1404	size = RX_RING_SIZE * bp->rx_buffer_size;
1405	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1406					    &bp->rx_buffers_dma, GFP_KERNEL);
1407	if (!bp->rx_buffers)
1408		return -ENOMEM;
1409	else
1410		netdev_dbg(bp->dev,
1411			   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1412			   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1413	return 0;
1414}
1415
1416static int macb_alloc_consistent(struct macb *bp)
1417{
1418	struct macb_queue *queue;
1419	unsigned int q;
1420	int size;
1421
1422	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1423		size = TX_RING_BYTES;
1424		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1425						    &queue->tx_ring_dma,
1426						    GFP_KERNEL);
1427		if (!queue->tx_ring)
1428			goto out_err;
1429		netdev_dbg(bp->dev,
1430			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1431			   q, size, (unsigned long)queue->tx_ring_dma,
1432			   queue->tx_ring);
1433
1434		size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1435		queue->tx_skb = kmalloc(size, GFP_KERNEL);
1436		if (!queue->tx_skb)
1437			goto out_err;
1438	}
1439
1440	size = RX_RING_BYTES;
1441	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1442					 &bp->rx_ring_dma, GFP_KERNEL);
1443	if (!bp->rx_ring)
1444		goto out_err;
1445	netdev_dbg(bp->dev,
1446		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1447		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1448
1449	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1450		goto out_err;
1451
1452	return 0;
1453
1454out_err:
1455	macb_free_consistent(bp);
1456	return -ENOMEM;
1457}
1458
1459static void gem_init_rings(struct macb *bp)
1460{
1461	struct macb_queue *queue;
1462	unsigned int q;
1463	int i;
1464
1465	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1466		for (i = 0; i < TX_RING_SIZE; i++) {
1467			queue->tx_ring[i].addr = 0;
1468			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1469		}
1470		queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1471		queue->tx_head = 0;
1472		queue->tx_tail = 0;
1473	}
1474
1475	bp->rx_tail = 0;
1476	bp->rx_prepared_head = 0;
1477
1478	gem_rx_refill(bp);
1479}
1480
1481static void macb_init_rings(struct macb *bp)
1482{
1483	int i;
1484	dma_addr_t addr;
1485
1486	addr = bp->rx_buffers_dma;
1487	for (i = 0; i < RX_RING_SIZE; i++) {
1488		bp->rx_ring[i].addr = addr;
1489		bp->rx_ring[i].ctrl = 0;
1490		addr += bp->rx_buffer_size;
1491	}
1492	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1493
1494	for (i = 0; i < TX_RING_SIZE; i++) {
1495		bp->queues[0].tx_ring[i].addr = 0;
1496		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1497	}
1498	bp->queues[0].tx_head = 0;
1499	bp->queues[0].tx_tail = 0;
1500	bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1501
1502	bp->rx_tail = 0;
1503}
1504
1505static void macb_reset_hw(struct macb *bp)
1506{
1507	struct macb_queue *queue;
1508	unsigned int q;
1509
1510	/*
1511	 * Disable RX and TX (XXX: Should we halt the transmission
1512	 * more gracefully?)
1513	 */
1514	macb_writel(bp, NCR, 0);
1515
1516	/* Clear the stats registers (XXX: Update stats first?) */
1517	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1518
1519	/* Clear all status flags */
1520	macb_writel(bp, TSR, -1);
1521	macb_writel(bp, RSR, -1);
1522
1523	/* Disable all interrupts */
1524	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1525		queue_writel(queue, IDR, -1);
1526		queue_readl(queue, ISR);
1527	}
1528}
1529
1530static u32 gem_mdc_clk_div(struct macb *bp)
1531{
1532	u32 config;
1533	unsigned long pclk_hz = clk_get_rate(bp->pclk);
1534
1535	if (pclk_hz <= 20000000)
1536		config = GEM_BF(CLK, GEM_CLK_DIV8);
1537	else if (pclk_hz <= 40000000)
1538		config = GEM_BF(CLK, GEM_CLK_DIV16);
1539	else if (pclk_hz <= 80000000)
1540		config = GEM_BF(CLK, GEM_CLK_DIV32);
1541	else if (pclk_hz <= 120000000)
1542		config = GEM_BF(CLK, GEM_CLK_DIV48);
1543	else if (pclk_hz <= 160000000)
1544		config = GEM_BF(CLK, GEM_CLK_DIV64);
1545	else
1546		config = GEM_BF(CLK, GEM_CLK_DIV96);
1547
1548	return config;
1549}
1550
1551static u32 macb_mdc_clk_div(struct macb *bp)
1552{
1553	u32 config;
1554	unsigned long pclk_hz;
1555
1556	if (macb_is_gem(bp))
1557		return gem_mdc_clk_div(bp);
1558
1559	pclk_hz = clk_get_rate(bp->pclk);
1560	if (pclk_hz <= 20000000)
1561		config = MACB_BF(CLK, MACB_CLK_DIV8);
1562	else if (pclk_hz <= 40000000)
1563		config = MACB_BF(CLK, MACB_CLK_DIV16);
1564	else if (pclk_hz <= 80000000)
1565		config = MACB_BF(CLK, MACB_CLK_DIV32);
1566	else
1567		config = MACB_BF(CLK, MACB_CLK_DIV64);
1568
1569	return config;
1570}
1571
1572/*
1573 * Get the DMA bus width field of the network configuration register that we
1574 * should program.  We find the width from decoding the design configuration
1575 * register to find the maximum supported data bus width.
1576 */
1577static u32 macb_dbw(struct macb *bp)
1578{
1579	if (!macb_is_gem(bp))
1580		return 0;
1581
1582	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1583	case 4:
1584		return GEM_BF(DBW, GEM_DBW128);
1585	case 2:
1586		return GEM_BF(DBW, GEM_DBW64);
1587	case 1:
1588	default:
1589		return GEM_BF(DBW, GEM_DBW32);
1590	}
1591}
1592
1593/*
1594 * Configure the receive DMA engine
1595 * - use the correct receive buffer size
1596 * - set best burst length for DMA operations
1597 *   (if not supported by FIFO, it will fallback to default)
1598 * - set both rx/tx packet buffers to full memory size
1599 * These are configurable parameters for GEM.
1600 */
1601static void macb_configure_dma(struct macb *bp)
1602{
1603	u32 dmacfg;
1604	u32 tmp, ncr;
1605
1606	if (macb_is_gem(bp)) {
1607		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1608		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1609		if (bp->dma_burst_length)
1610			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1611		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1612		dmacfg &= ~GEM_BIT(ENDIA_PKT);
1613
1614		/* Find the CPU endianness by using the loopback bit of net_ctrl
1615		 * register. save it first. When the CPU is in big endian we
1616		 * need to program swaped mode for management descriptor access.
1617		 */
1618		ncr = macb_readl(bp, NCR);
1619		__raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1620		tmp =  __raw_readl(bp->regs + MACB_NCR);
1621
1622		if (tmp == MACB_BIT(LLB))
1623			dmacfg &= ~GEM_BIT(ENDIA_DESC);
1624		else
1625			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1626
1627		/* Restore net_ctrl */
1628		macb_writel(bp, NCR, ncr);
1629
1630		if (bp->dev->features & NETIF_F_HW_CSUM)
1631			dmacfg |= GEM_BIT(TXCOEN);
1632		else
1633			dmacfg &= ~GEM_BIT(TXCOEN);
1634		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1635			   dmacfg);
1636		gem_writel(bp, DMACFG, dmacfg);
1637	}
1638}
1639
1640static void macb_init_hw(struct macb *bp)
1641{
1642	struct macb_queue *queue;
1643	unsigned int q;
1644
1645	u32 config;
1646
1647	macb_reset_hw(bp);
1648	macb_set_hwaddr(bp);
1649
1650	config = macb_mdc_clk_div(bp);
1651	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
1652	config |= MACB_BIT(PAE);		/* PAuse Enable */
1653	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
1654	config |= MACB_BIT(BIG);		/* Receive oversized frames */
1655	if (bp->dev->flags & IFF_PROMISC)
1656		config |= MACB_BIT(CAF);	/* Copy All Frames */
1657	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1658		config |= GEM_BIT(RXCOEN);
1659	if (!(bp->dev->flags & IFF_BROADCAST))
1660		config |= MACB_BIT(NBC);	/* No BroadCast */
1661	config |= macb_dbw(bp);
1662	macb_writel(bp, NCFGR, config);
1663	bp->speed = SPEED_10;
1664	bp->duplex = DUPLEX_HALF;
1665
1666	macb_configure_dma(bp);
1667
1668	/* Initialize TX and RX buffers */
1669	macb_writel(bp, RBQP, bp->rx_ring_dma);
1670	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1671		queue_writel(queue, TBQP, queue->tx_ring_dma);
1672
1673		/* Enable interrupts */
1674		queue_writel(queue, IER,
1675			     MACB_RX_INT_FLAGS |
1676			     MACB_TX_INT_FLAGS |
1677			     MACB_BIT(HRESP));
1678	}
1679
1680	/* Enable TX and RX */
1681	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1682}
1683
1684/*
1685 * The hash address register is 64 bits long and takes up two
1686 * locations in the memory map.  The least significant bits are stored
1687 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1688 *
1689 * The unicast hash enable and the multicast hash enable bits in the
1690 * network configuration register enable the reception of hash matched
1691 * frames. The destination address is reduced to a 6 bit index into
1692 * the 64 bit hash register using the following hash function.  The
1693 * hash function is an exclusive or of every sixth bit of the
1694 * destination address.
1695 *
1696 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1697 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1698 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1699 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1700 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1701 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1702 *
1703 * da[0] represents the least significant bit of the first byte
1704 * received, that is, the multicast/unicast indicator, and da[47]
1705 * represents the most significant bit of the last byte received.  If
1706 * the hash index, hi[n], points to a bit that is set in the hash
1707 * register then the frame will be matched according to whether the
1708 * frame is multicast or unicast.  A multicast match will be signalled
1709 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1710 * index points to a bit set in the hash register.  A unicast match
1711 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1712 * and the hash index points to a bit set in the hash register.  To
1713 * receive all multicast frames, the hash register should be set with
1714 * all ones and the multicast hash enable bit should be set in the
1715 * network configuration register.
1716 */
1717
1718static inline int hash_bit_value(int bitnr, __u8 *addr)
1719{
1720	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1721		return 1;
1722	return 0;
1723}
1724
1725/*
1726 * Return the hash index value for the specified address.
1727 */
1728static int hash_get_index(__u8 *addr)
1729{
1730	int i, j, bitval;
1731	int hash_index = 0;
1732
1733	for (j = 0; j < 6; j++) {
1734		for (i = 0, bitval = 0; i < 8; i++)
1735			bitval ^= hash_bit_value(i * 6 + j, addr);
1736
1737		hash_index |= (bitval << j);
1738	}
1739
1740	return hash_index;
1741}
1742
1743/*
1744 * Add multicast addresses to the internal multicast-hash table.
1745 */
1746static void macb_sethashtable(struct net_device *dev)
1747{
1748	struct netdev_hw_addr *ha;
1749	unsigned long mc_filter[2];
1750	unsigned int bitnr;
1751	struct macb *bp = netdev_priv(dev);
1752
1753	mc_filter[0] = mc_filter[1] = 0;
1754
1755	netdev_for_each_mc_addr(ha, dev) {
1756		bitnr = hash_get_index(ha->addr);
1757		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1758	}
1759
1760	macb_or_gem_writel(bp, HRB, mc_filter[0]);
1761	macb_or_gem_writel(bp, HRT, mc_filter[1]);
1762}
1763
1764/*
1765 * Enable/Disable promiscuous and multicast modes.
1766 */
1767static void macb_set_rx_mode(struct net_device *dev)
1768{
1769	unsigned long cfg;
1770	struct macb *bp = netdev_priv(dev);
1771
1772	cfg = macb_readl(bp, NCFGR);
1773
1774	if (dev->flags & IFF_PROMISC) {
1775		/* Enable promiscuous mode */
1776		cfg |= MACB_BIT(CAF);
1777
1778		/* Disable RX checksum offload */
1779		if (macb_is_gem(bp))
1780			cfg &= ~GEM_BIT(RXCOEN);
1781	} else {
1782		/* Disable promiscuous mode */
1783		cfg &= ~MACB_BIT(CAF);
1784
1785		/* Enable RX checksum offload only if requested */
1786		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1787			cfg |= GEM_BIT(RXCOEN);
1788	}
1789
1790	if (dev->flags & IFF_ALLMULTI) {
1791		/* Enable all multicast mode */
1792		macb_or_gem_writel(bp, HRB, -1);
1793		macb_or_gem_writel(bp, HRT, -1);
1794		cfg |= MACB_BIT(NCFGR_MTI);
1795	} else if (!netdev_mc_empty(dev)) {
1796		/* Enable specific multicasts */
1797		macb_sethashtable(dev);
1798		cfg |= MACB_BIT(NCFGR_MTI);
1799	} else if (dev->flags & (~IFF_ALLMULTI)) {
1800		/* Disable all multicast mode */
1801		macb_or_gem_writel(bp, HRB, 0);
1802		macb_or_gem_writel(bp, HRT, 0);
1803		cfg &= ~MACB_BIT(NCFGR_MTI);
1804	}
1805
1806	macb_writel(bp, NCFGR, cfg);
1807}
1808
1809static int macb_open(struct net_device *dev)
1810{
1811	struct macb *bp = netdev_priv(dev);
1812	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1813	int err;
1814
1815	netdev_dbg(bp->dev, "open\n");
1816
1817	/* carrier starts down */
1818	netif_carrier_off(dev);
1819
1820	/* if the phy is not yet register, retry later*/
1821	if (!bp->phy_dev)
1822		return -EAGAIN;
1823
1824	/* RX buffers initialization */
1825	macb_init_rx_buffer_size(bp, bufsz);
1826
1827	err = macb_alloc_consistent(bp);
1828	if (err) {
1829		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1830			   err);
1831		return err;
1832	}
1833
1834	napi_enable(&bp->napi);
1835
1836	bp->macbgem_ops.mog_init_rings(bp);
1837	macb_init_hw(bp);
1838
1839	/* schedule a link state check */
1840	phy_start(bp->phy_dev);
1841
1842	netif_tx_start_all_queues(dev);
1843
1844	return 0;
1845}
1846
1847static int macb_close(struct net_device *dev)
1848{
1849	struct macb *bp = netdev_priv(dev);
1850	unsigned long flags;
1851
1852	netif_tx_stop_all_queues(dev);
1853	napi_disable(&bp->napi);
1854
1855	if (bp->phy_dev)
1856		phy_stop(bp->phy_dev);
1857
1858	spin_lock_irqsave(&bp->lock, flags);
1859	macb_reset_hw(bp);
1860	netif_carrier_off(dev);
1861	spin_unlock_irqrestore(&bp->lock, flags);
1862
1863	macb_free_consistent(bp);
1864
1865	return 0;
1866}
1867
1868static void gem_update_stats(struct macb *bp)
1869{
1870	int i;
1871	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1872
1873	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1874		u32 offset = gem_statistics[i].offset;
1875		u64 val = readl_relaxed(bp->regs + offset);
1876
1877		bp->ethtool_stats[i] += val;
1878		*p += val;
1879
1880		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1881			/* Add GEM_OCTTXH, GEM_OCTRXH */
1882			val = readl_relaxed(bp->regs + offset + 4);
1883			bp->ethtool_stats[i] += ((u64)val) << 32;
1884			*(++p) += val;
1885		}
1886	}
1887}
1888
1889static struct net_device_stats *gem_get_stats(struct macb *bp)
1890{
1891	struct gem_stats *hwstat = &bp->hw_stats.gem;
1892	struct net_device_stats *nstat = &bp->stats;
1893
1894	gem_update_stats(bp);
1895
1896	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1897			    hwstat->rx_alignment_errors +
1898			    hwstat->rx_resource_errors +
1899			    hwstat->rx_overruns +
1900			    hwstat->rx_oversize_frames +
1901			    hwstat->rx_jabbers +
1902			    hwstat->rx_undersized_frames +
1903			    hwstat->rx_length_field_frame_errors);
1904	nstat->tx_errors = (hwstat->tx_late_collisions +
1905			    hwstat->tx_excessive_collisions +
1906			    hwstat->tx_underrun +
1907			    hwstat->tx_carrier_sense_errors);
1908	nstat->multicast = hwstat->rx_multicast_frames;
1909	nstat->collisions = (hwstat->tx_single_collision_frames +
1910			     hwstat->tx_multiple_collision_frames +
1911			     hwstat->tx_excessive_collisions);
1912	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1913				   hwstat->rx_jabbers +
1914				   hwstat->rx_undersized_frames +
1915				   hwstat->rx_length_field_frame_errors);
1916	nstat->rx_over_errors = hwstat->rx_resource_errors;
1917	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1918	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1919	nstat->rx_fifo_errors = hwstat->rx_overruns;
1920	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1921	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1922	nstat->tx_fifo_errors = hwstat->tx_underrun;
1923
1924	return nstat;
1925}
1926
1927static void gem_get_ethtool_stats(struct net_device *dev,
1928				  struct ethtool_stats *stats, u64 *data)
1929{
1930	struct macb *bp;
1931
1932	bp = netdev_priv(dev);
1933	gem_update_stats(bp);
1934	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1935}
1936
1937static int gem_get_sset_count(struct net_device *dev, int sset)
1938{
1939	switch (sset) {
1940	case ETH_SS_STATS:
1941		return GEM_STATS_LEN;
1942	default:
1943		return -EOPNOTSUPP;
1944	}
1945}
1946
1947static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1948{
1949	int i;
1950
1951	switch (sset) {
1952	case ETH_SS_STATS:
1953		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
1954			memcpy(p, gem_statistics[i].stat_string,
1955			       ETH_GSTRING_LEN);
1956		break;
1957	}
1958}
1959
1960static struct net_device_stats *macb_get_stats(struct net_device *dev)
1961{
1962	struct macb *bp = netdev_priv(dev);
1963	struct net_device_stats *nstat = &bp->stats;
1964	struct macb_stats *hwstat = &bp->hw_stats.macb;
1965
1966	if (macb_is_gem(bp))
1967		return gem_get_stats(bp);
1968
1969	/* read stats from hardware */
1970	macb_update_stats(bp);
1971
1972	/* Convert HW stats into netdevice stats */
1973	nstat->rx_errors = (hwstat->rx_fcs_errors +
1974			    hwstat->rx_align_errors +
1975			    hwstat->rx_resource_errors +
1976			    hwstat->rx_overruns +
1977			    hwstat->rx_oversize_pkts +
1978			    hwstat->rx_jabbers +
1979			    hwstat->rx_undersize_pkts +
1980			    hwstat->rx_length_mismatch);
1981	nstat->tx_errors = (hwstat->tx_late_cols +
1982			    hwstat->tx_excessive_cols +
1983			    hwstat->tx_underruns +
1984			    hwstat->tx_carrier_errors +
1985			    hwstat->sqe_test_errors);
1986	nstat->collisions = (hwstat->tx_single_cols +
1987			     hwstat->tx_multiple_cols +
1988			     hwstat->tx_excessive_cols);
1989	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1990				   hwstat->rx_jabbers +
1991				   hwstat->rx_undersize_pkts +
1992				   hwstat->rx_length_mismatch);
1993	nstat->rx_over_errors = hwstat->rx_resource_errors +
1994				   hwstat->rx_overruns;
1995	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1996	nstat->rx_frame_errors = hwstat->rx_align_errors;
1997	nstat->rx_fifo_errors = hwstat->rx_overruns;
1998	/* XXX: What does "missed" mean? */
1999	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2000	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2001	nstat->tx_fifo_errors = hwstat->tx_underruns;
2002	/* Don't know about heartbeat or window errors... */
2003
2004	return nstat;
2005}
2006
2007static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2008{
2009	struct macb *bp = netdev_priv(dev);
2010	struct phy_device *phydev = bp->phy_dev;
2011
2012	if (!phydev)
2013		return -ENODEV;
2014
2015	return phy_ethtool_gset(phydev, cmd);
2016}
2017
2018static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2019{
2020	struct macb *bp = netdev_priv(dev);
2021	struct phy_device *phydev = bp->phy_dev;
2022
2023	if (!phydev)
2024		return -ENODEV;
2025
2026	return phy_ethtool_sset(phydev, cmd);
2027}
2028
2029static int macb_get_regs_len(struct net_device *netdev)
2030{
2031	return MACB_GREGS_NBR * sizeof(u32);
2032}
2033
2034static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2035			  void *p)
2036{
2037	struct macb *bp = netdev_priv(dev);
2038	unsigned int tail, head;
2039	u32 *regs_buff = p;
2040
2041	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2042			| MACB_GREGS_VERSION;
2043
2044	tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2045	head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2046
2047	regs_buff[0]  = macb_readl(bp, NCR);
2048	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
2049	regs_buff[2]  = macb_readl(bp, NSR);
2050	regs_buff[3]  = macb_readl(bp, TSR);
2051	regs_buff[4]  = macb_readl(bp, RBQP);
2052	regs_buff[5]  = macb_readl(bp, TBQP);
2053	regs_buff[6]  = macb_readl(bp, RSR);
2054	regs_buff[7]  = macb_readl(bp, IMR);
2055
2056	regs_buff[8]  = tail;
2057	regs_buff[9]  = head;
2058	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2059	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2060
2061	regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2062	if (macb_is_gem(bp)) {
2063		regs_buff[13] = gem_readl(bp, DMACFG);
2064	}
2065}
2066
2067static const struct ethtool_ops macb_ethtool_ops = {
2068	.get_settings		= macb_get_settings,
2069	.set_settings		= macb_set_settings,
2070	.get_regs_len		= macb_get_regs_len,
2071	.get_regs		= macb_get_regs,
2072	.get_link		= ethtool_op_get_link,
2073	.get_ts_info		= ethtool_op_get_ts_info,
2074};
2075
2076static const struct ethtool_ops gem_ethtool_ops = {
2077	.get_settings		= macb_get_settings,
2078	.set_settings		= macb_set_settings,
2079	.get_regs_len		= macb_get_regs_len,
2080	.get_regs		= macb_get_regs,
2081	.get_link		= ethtool_op_get_link,
2082	.get_ts_info		= ethtool_op_get_ts_info,
2083	.get_ethtool_stats	= gem_get_ethtool_stats,
2084	.get_strings		= gem_get_ethtool_strings,
2085	.get_sset_count		= gem_get_sset_count,
2086};
2087
2088static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2089{
2090	struct macb *bp = netdev_priv(dev);
2091	struct phy_device *phydev = bp->phy_dev;
2092
2093	if (!netif_running(dev))
2094		return -EINVAL;
2095
2096	if (!phydev)
2097		return -ENODEV;
2098
2099	return phy_mii_ioctl(phydev, rq, cmd);
2100}
2101
2102static int macb_set_features(struct net_device *netdev,
2103			     netdev_features_t features)
2104{
2105	struct macb *bp = netdev_priv(netdev);
2106	netdev_features_t changed = features ^ netdev->features;
2107
2108	/* TX checksum offload */
2109	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2110		u32 dmacfg;
2111
2112		dmacfg = gem_readl(bp, DMACFG);
2113		if (features & NETIF_F_HW_CSUM)
2114			dmacfg |= GEM_BIT(TXCOEN);
2115		else
2116			dmacfg &= ~GEM_BIT(TXCOEN);
2117		gem_writel(bp, DMACFG, dmacfg);
2118	}
2119
2120	/* RX checksum offload */
2121	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2122		u32 netcfg;
2123
2124		netcfg = gem_readl(bp, NCFGR);
2125		if (features & NETIF_F_RXCSUM &&
2126		    !(netdev->flags & IFF_PROMISC))
2127			netcfg |= GEM_BIT(RXCOEN);
2128		else
2129			netcfg &= ~GEM_BIT(RXCOEN);
2130		gem_writel(bp, NCFGR, netcfg);
2131	}
2132
2133	return 0;
2134}
2135
2136static const struct net_device_ops macb_netdev_ops = {
2137	.ndo_open		= macb_open,
2138	.ndo_stop		= macb_close,
2139	.ndo_start_xmit		= macb_start_xmit,
2140	.ndo_set_rx_mode	= macb_set_rx_mode,
2141	.ndo_get_stats		= macb_get_stats,
2142	.ndo_do_ioctl		= macb_ioctl,
2143	.ndo_validate_addr	= eth_validate_addr,
2144	.ndo_change_mtu		= eth_change_mtu,
2145	.ndo_set_mac_address	= eth_mac_addr,
2146#ifdef CONFIG_NET_POLL_CONTROLLER
2147	.ndo_poll_controller	= macb_poll_controller,
2148#endif
2149	.ndo_set_features	= macb_set_features,
2150};
2151
2152/*
2153 * Configure peripheral capabilities according to device tree
2154 * and integration options used
2155 */
2156static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
2157{
2158	u32 dcfg;
2159
2160	if (dt_conf)
2161		bp->caps = dt_conf->caps;
2162
2163	if (macb_is_gem_hw(bp->regs)) {
2164		bp->caps |= MACB_CAPS_MACB_IS_GEM;
2165
2166		dcfg = gem_readl(bp, DCFG1);
2167		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2168			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2169		dcfg = gem_readl(bp, DCFG2);
2170		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2171			bp->caps |= MACB_CAPS_FIFO_MODE;
2172	}
2173
2174	netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
2175}
2176
2177static void macb_probe_queues(void __iomem *mem,
2178			      unsigned int *queue_mask,
2179			      unsigned int *num_queues)
2180{
2181	unsigned int hw_q;
2182
2183	*queue_mask = 0x1;
2184	*num_queues = 1;
2185
2186	/* is it macb or gem ?
2187	 *
2188	 * We need to read directly from the hardware here because
2189	 * we are early in the probe process and don't have the
2190	 * MACB_CAPS_MACB_IS_GEM flag positioned
2191	 */
2192	if (!macb_is_gem_hw(mem))
2193		return;
2194
2195	/* bit 0 is never set but queue 0 always exists */
2196	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2197
2198	*queue_mask |= 0x1;
2199
2200	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2201		if (*queue_mask & (1 << hw_q))
2202			(*num_queues)++;
2203}
2204
2205static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2206			 struct clk **hclk, struct clk **tx_clk)
2207{
2208	int err;
2209
2210	*pclk = devm_clk_get(&pdev->dev, "pclk");
2211	if (IS_ERR(*pclk)) {
2212		err = PTR_ERR(*pclk);
2213		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2214		return err;
2215	}
2216
2217	*hclk = devm_clk_get(&pdev->dev, "hclk");
2218	if (IS_ERR(*hclk)) {
2219		err = PTR_ERR(*hclk);
2220		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2221		return err;
2222	}
2223
2224	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2225	if (IS_ERR(*tx_clk))
2226		*tx_clk = NULL;
2227
2228	err = clk_prepare_enable(*pclk);
2229	if (err) {
2230		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2231		return err;
2232	}
2233
2234	err = clk_prepare_enable(*hclk);
2235	if (err) {
2236		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2237		goto err_disable_pclk;
2238	}
2239
2240	err = clk_prepare_enable(*tx_clk);
2241	if (err) {
2242		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2243		goto err_disable_hclk;
2244	}
2245
2246	return 0;
2247
2248err_disable_hclk:
2249	clk_disable_unprepare(*hclk);
2250
2251err_disable_pclk:
2252	clk_disable_unprepare(*pclk);
2253
2254	return err;
2255}
2256
2257static int macb_init(struct platform_device *pdev)
2258{
2259	struct net_device *dev = platform_get_drvdata(pdev);
2260	unsigned int hw_q, q;
2261	struct macb *bp = netdev_priv(dev);
2262	struct macb_queue *queue;
2263	int err;
2264	u32 val;
2265
2266	/* set the queue register mapping once for all: queue0 has a special
2267	 * register mapping but we don't want to test the queue index then
2268	 * compute the corresponding register offset at run time.
2269	 */
2270	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2271		if (!(bp->queue_mask & (1 << hw_q)))
2272			continue;
2273
2274		queue = &bp->queues[q];
2275		queue->bp = bp;
2276		if (hw_q) {
2277			queue->ISR  = GEM_ISR(hw_q - 1);
2278			queue->IER  = GEM_IER(hw_q - 1);
2279			queue->IDR  = GEM_IDR(hw_q - 1);
2280			queue->IMR  = GEM_IMR(hw_q - 1);
2281			queue->TBQP = GEM_TBQP(hw_q - 1);
2282		} else {
2283			/* queue0 uses legacy registers */
2284			queue->ISR  = MACB_ISR;
2285			queue->IER  = MACB_IER;
2286			queue->IDR  = MACB_IDR;
2287			queue->IMR  = MACB_IMR;
2288			queue->TBQP = MACB_TBQP;
2289		}
2290
2291		/* get irq: here we use the linux queue index, not the hardware
2292		 * queue index. the queue irq definitions in the device tree
2293		 * must remove the optional gaps that could exist in the
2294		 * hardware queue mask.
2295		 */
2296		queue->irq = platform_get_irq(pdev, q);
2297		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2298				       IRQF_SHARED, dev->name, queue);
2299		if (err) {
2300			dev_err(&pdev->dev,
2301				"Unable to request IRQ %d (error %d)\n",
2302				queue->irq, err);
2303			return err;
2304		}
2305
2306		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2307		q++;
2308	}
2309
2310	dev->netdev_ops = &macb_netdev_ops;
2311	netif_napi_add(dev, &bp->napi, macb_poll, 64);
2312
2313	/* setup appropriated routines according to adapter type */
2314	if (macb_is_gem(bp)) {
2315		bp->max_tx_length = GEM_MAX_TX_LEN;
2316		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2317		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2318		bp->macbgem_ops.mog_init_rings = gem_init_rings;
2319		bp->macbgem_ops.mog_rx = gem_rx;
2320		dev->ethtool_ops = &gem_ethtool_ops;
2321	} else {
2322		bp->max_tx_length = MACB_MAX_TX_LEN;
2323		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2324		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2325		bp->macbgem_ops.mog_init_rings = macb_init_rings;
2326		bp->macbgem_ops.mog_rx = macb_rx;
2327		dev->ethtool_ops = &macb_ethtool_ops;
2328	}
2329
2330	/* Set features */
2331	dev->hw_features = NETIF_F_SG;
2332	/* Checksum offload is only available on gem with packet buffer */
2333	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2334		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2335	if (bp->caps & MACB_CAPS_SG_DISABLED)
2336		dev->hw_features &= ~NETIF_F_SG;
2337	dev->features = dev->hw_features;
2338
2339	val = 0;
2340	if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2341		val = GEM_BIT(RGMII);
2342	else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2343		 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
2344		val = MACB_BIT(RMII);
2345	else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
2346		val = MACB_BIT(MII);
2347
2348	if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2349		val |= MACB_BIT(CLKEN);
2350
2351	macb_or_gem_writel(bp, USRIO, val);
2352
2353	/* Set MII management clock divider */
2354	val = macb_mdc_clk_div(bp);
2355	val |= macb_dbw(bp);
2356	macb_writel(bp, NCFGR, val);
2357
2358	return 0;
2359}
2360
2361#if defined(CONFIG_OF)
2362/* 1518 rounded up */
2363#define AT91ETHER_MAX_RBUFF_SZ	0x600
2364/* max number of receive buffers */
2365#define AT91ETHER_MAX_RX_DESCR	9
2366
2367/* Initialize and start the Receiver and Transmit subsystems */
2368static int at91ether_start(struct net_device *dev)
2369{
2370	struct macb *lp = netdev_priv(dev);
2371	dma_addr_t addr;
2372	u32 ctl;
2373	int i;
2374
2375	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2376					 (AT91ETHER_MAX_RX_DESCR *
2377					  sizeof(struct macb_dma_desc)),
2378					 &lp->rx_ring_dma, GFP_KERNEL);
2379	if (!lp->rx_ring)
2380		return -ENOMEM;
2381
2382	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2383					    AT91ETHER_MAX_RX_DESCR *
2384					    AT91ETHER_MAX_RBUFF_SZ,
2385					    &lp->rx_buffers_dma, GFP_KERNEL);
2386	if (!lp->rx_buffers) {
2387		dma_free_coherent(&lp->pdev->dev,
2388				  AT91ETHER_MAX_RX_DESCR *
2389				  sizeof(struct macb_dma_desc),
2390				  lp->rx_ring, lp->rx_ring_dma);
2391		lp->rx_ring = NULL;
2392		return -ENOMEM;
2393	}
2394
2395	addr = lp->rx_buffers_dma;
2396	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2397		lp->rx_ring[i].addr = addr;
2398		lp->rx_ring[i].ctrl = 0;
2399		addr += AT91ETHER_MAX_RBUFF_SZ;
2400	}
2401
2402	/* Set the Wrap bit on the last descriptor */
2403	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
2404
2405	/* Reset buffer index */
2406	lp->rx_tail = 0;
2407
2408	/* Program address of descriptor list in Rx Buffer Queue register */
2409	macb_writel(lp, RBQP, lp->rx_ring_dma);
2410
2411	/* Enable Receive and Transmit */
2412	ctl = macb_readl(lp, NCR);
2413	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2414
2415	return 0;
2416}
2417
2418/* Open the ethernet interface */
2419static int at91ether_open(struct net_device *dev)
2420{
2421	struct macb *lp = netdev_priv(dev);
2422	u32 ctl;
2423	int ret;
2424
2425	/* Clear internal statistics */
2426	ctl = macb_readl(lp, NCR);
2427	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2428
2429	macb_set_hwaddr(lp);
2430
2431	ret = at91ether_start(dev);
2432	if (ret)
2433		return ret;
2434
2435	/* Enable MAC interrupts */
2436	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
2437			     MACB_BIT(RXUBR)	|
2438			     MACB_BIT(ISR_TUND)	|
2439			     MACB_BIT(ISR_RLE)	|
2440			     MACB_BIT(TCOMP)	|
2441			     MACB_BIT(ISR_ROVR)	|
2442			     MACB_BIT(HRESP));
2443
2444	/* schedule a link state check */
2445	phy_start(lp->phy_dev);
2446
2447	netif_start_queue(dev);
2448
2449	return 0;
2450}
2451
2452/* Close the interface */
2453static int at91ether_close(struct net_device *dev)
2454{
2455	struct macb *lp = netdev_priv(dev);
2456	u32 ctl;
2457
2458	/* Disable Receiver and Transmitter */
2459	ctl = macb_readl(lp, NCR);
2460	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2461
2462	/* Disable MAC interrupts */
2463	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
2464			     MACB_BIT(RXUBR)	|
2465			     MACB_BIT(ISR_TUND)	|
2466			     MACB_BIT(ISR_RLE)	|
2467			     MACB_BIT(TCOMP)	|
2468			     MACB_BIT(ISR_ROVR) |
2469			     MACB_BIT(HRESP));
2470
2471	netif_stop_queue(dev);
2472
2473	dma_free_coherent(&lp->pdev->dev,
2474			  AT91ETHER_MAX_RX_DESCR *
2475			  sizeof(struct macb_dma_desc),
2476			  lp->rx_ring, lp->rx_ring_dma);
2477	lp->rx_ring = NULL;
2478
2479	dma_free_coherent(&lp->pdev->dev,
2480			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2481			  lp->rx_buffers, lp->rx_buffers_dma);
2482	lp->rx_buffers = NULL;
2483
2484	return 0;
2485}
2486
2487/* Transmit packet */
2488static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2489{
2490	struct macb *lp = netdev_priv(dev);
2491
2492	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2493		netif_stop_queue(dev);
2494
2495		/* Store packet information (to free when Tx completed) */
2496		lp->skb = skb;
2497		lp->skb_length = skb->len;
2498		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2499							DMA_TO_DEVICE);
2500
2501		/* Set address of the data in the Transmit Address register */
2502		macb_writel(lp, TAR, lp->skb_physaddr);
2503		/* Set length of the packet in the Transmit Control register */
2504		macb_writel(lp, TCR, skb->len);
2505
2506	} else {
2507		netdev_err(dev, "%s called, but device is busy!\n", __func__);
2508		return NETDEV_TX_BUSY;
2509	}
2510
2511	return NETDEV_TX_OK;
2512}
2513
2514/* Extract received frame from buffer descriptors and sent to upper layers.
2515 * (Called from interrupt context)
2516 */
2517static void at91ether_rx(struct net_device *dev)
2518{
2519	struct macb *lp = netdev_priv(dev);
2520	unsigned char *p_recv;
2521	struct sk_buff *skb;
2522	unsigned int pktlen;
2523
2524	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
2525		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2526		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
2527		skb = netdev_alloc_skb(dev, pktlen + 2);
2528		if (skb) {
2529			skb_reserve(skb, 2);
2530			memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2531
2532			skb->protocol = eth_type_trans(skb, dev);
2533			lp->stats.rx_packets++;
2534			lp->stats.rx_bytes += pktlen;
2535			netif_rx(skb);
2536		} else {
2537			lp->stats.rx_dropped++;
2538		}
2539
2540		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
2541			lp->stats.multicast++;
2542
2543		/* reset ownership bit */
2544		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
2545
2546		/* wrap after last buffer */
2547		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2548			lp->rx_tail = 0;
2549		else
2550			lp->rx_tail++;
2551	}
2552}
2553
2554/* MAC interrupt handler */
2555static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2556{
2557	struct net_device *dev = dev_id;
2558	struct macb *lp = netdev_priv(dev);
2559	u32 intstatus, ctl;
2560
2561	/* MAC Interrupt Status register indicates what interrupts are pending.
2562	 * It is automatically cleared once read.
2563	 */
2564	intstatus = macb_readl(lp, ISR);
2565
2566	/* Receive complete */
2567	if (intstatus & MACB_BIT(RCOMP))
2568		at91ether_rx(dev);
2569
2570	/* Transmit complete */
2571	if (intstatus & MACB_BIT(TCOMP)) {
2572		/* The TCOM bit is set even if the transmission failed */
2573		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
2574			lp->stats.tx_errors++;
2575
2576		if (lp->skb) {
2577			dev_kfree_skb_irq(lp->skb);
2578			lp->skb = NULL;
2579			dma_unmap_single(NULL, lp->skb_physaddr,
2580					 lp->skb_length, DMA_TO_DEVICE);
2581			lp->stats.tx_packets++;
2582			lp->stats.tx_bytes += lp->skb_length;
2583		}
2584		netif_wake_queue(dev);
2585	}
2586
2587	/* Work-around for EMAC Errata section 41.3.1 */
2588	if (intstatus & MACB_BIT(RXUBR)) {
2589		ctl = macb_readl(lp, NCR);
2590		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2591		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2592	}
2593
2594	if (intstatus & MACB_BIT(ISR_ROVR))
2595		netdev_err(dev, "ROVR error\n");
2596
2597	return IRQ_HANDLED;
2598}
2599
2600#ifdef CONFIG_NET_POLL_CONTROLLER
2601static void at91ether_poll_controller(struct net_device *dev)
2602{
2603	unsigned long flags;
2604
2605	local_irq_save(flags);
2606	at91ether_interrupt(dev->irq, dev);
2607	local_irq_restore(flags);
2608}
2609#endif
2610
2611static const struct net_device_ops at91ether_netdev_ops = {
2612	.ndo_open		= at91ether_open,
2613	.ndo_stop		= at91ether_close,
2614	.ndo_start_xmit		= at91ether_start_xmit,
2615	.ndo_get_stats		= macb_get_stats,
2616	.ndo_set_rx_mode	= macb_set_rx_mode,
2617	.ndo_set_mac_address	= eth_mac_addr,
2618	.ndo_do_ioctl		= macb_ioctl,
2619	.ndo_validate_addr	= eth_validate_addr,
2620	.ndo_change_mtu		= eth_change_mtu,
2621#ifdef CONFIG_NET_POLL_CONTROLLER
2622	.ndo_poll_controller	= at91ether_poll_controller,
2623#endif
2624};
2625
2626static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
2627			      struct clk **hclk, struct clk **tx_clk)
2628{
2629	int err;
2630
2631	*hclk = NULL;
2632	*tx_clk = NULL;
2633
2634	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
2635	if (IS_ERR(*pclk))
2636		return PTR_ERR(*pclk);
2637
2638	err = clk_prepare_enable(*pclk);
2639	if (err) {
2640		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2641		return err;
2642	}
2643
2644	return 0;
2645}
2646
2647static int at91ether_init(struct platform_device *pdev)
2648{
2649	struct net_device *dev = platform_get_drvdata(pdev);
2650	struct macb *bp = netdev_priv(dev);
2651	int err;
2652	u32 reg;
2653
2654	dev->netdev_ops = &at91ether_netdev_ops;
2655	dev->ethtool_ops = &macb_ethtool_ops;
2656
2657	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
2658			       0, dev->name, dev);
2659	if (err)
2660		return err;
2661
2662	macb_writel(bp, NCR, 0);
2663
2664	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2665	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2666		reg |= MACB_BIT(RM9200_RMII);
2667
2668	macb_writel(bp, NCFGR, reg);
2669
2670	return 0;
2671}
2672
2673static const struct macb_config at91sam9260_config = {
2674	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
2675	.clk_init = macb_clk_init,
2676	.init = macb_init,
2677};
2678
2679static const struct macb_config pc302gem_config = {
2680	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2681	.dma_burst_length = 16,
2682	.clk_init = macb_clk_init,
2683	.init = macb_init,
2684};
2685
2686static const struct macb_config sama5d3_config = {
2687	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2688	.dma_burst_length = 16,
2689	.clk_init = macb_clk_init,
2690	.init = macb_init,
2691};
2692
2693static const struct macb_config sama5d4_config = {
2694	.caps = 0,
2695	.dma_burst_length = 4,
2696	.clk_init = macb_clk_init,
2697	.init = macb_init,
2698};
2699
2700static const struct macb_config emac_config = {
2701	.clk_init = at91ether_clk_init,
2702	.init = at91ether_init,
2703};
2704
2705static const struct macb_config zynq_config = {
2706	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
2707		MACB_CAPS_NO_GIGABIT_HALF,
2708	.dma_burst_length = 16,
2709	.clk_init = macb_clk_init,
2710	.init = macb_init,
2711};
2712
2713static const struct of_device_id macb_dt_ids[] = {
2714	{ .compatible = "cdns,at32ap7000-macb" },
2715	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
2716	{ .compatible = "cdns,macb" },
2717	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
2718	{ .compatible = "cdns,gem", .data = &pc302gem_config },
2719	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2720	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2721	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2722	{ .compatible = "cdns,emac", .data = &emac_config },
2723	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
2724	{ /* sentinel */ }
2725};
2726MODULE_DEVICE_TABLE(of, macb_dt_ids);
2727#endif /* CONFIG_OF */
2728
2729static int macb_probe(struct platform_device *pdev)
2730{
2731	int (*clk_init)(struct platform_device *, struct clk **,
2732			struct clk **, struct clk **)
2733					      = macb_clk_init;
2734	int (*init)(struct platform_device *) = macb_init;
2735	struct device_node *np = pdev->dev.of_node;
2736	const struct macb_config *macb_config = NULL;
2737	struct clk *pclk, *hclk, *tx_clk;
2738	unsigned int queue_mask, num_queues;
2739	struct macb_platform_data *pdata;
2740	struct phy_device *phydev;
2741	struct net_device *dev;
2742	struct resource *regs;
2743	void __iomem *mem;
2744	const char *mac;
2745	struct macb *bp;
2746	int err;
2747
2748	if (np) {
2749		const struct of_device_id *match;
2750
2751		match = of_match_node(macb_dt_ids, np);
2752		if (match && match->data) {
2753			macb_config = match->data;
2754			clk_init = macb_config->clk_init;
2755			init = macb_config->init;
2756		}
2757	}
2758
2759	err = clk_init(pdev, &pclk, &hclk, &tx_clk);
2760	if (err)
2761		return err;
2762
2763	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2764	mem = devm_ioremap_resource(&pdev->dev, regs);
2765	if (IS_ERR(mem)) {
2766		err = PTR_ERR(mem);
2767		goto err_disable_clocks;
2768	}
2769
2770	macb_probe_queues(mem, &queue_mask, &num_queues);
2771	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2772	if (!dev) {
2773		err = -ENOMEM;
2774		goto err_disable_clocks;
2775	}
2776
2777	dev->base_addr = regs->start;
2778
2779	SET_NETDEV_DEV(dev, &pdev->dev);
2780
2781	bp = netdev_priv(dev);
2782	bp->pdev = pdev;
2783	bp->dev = dev;
2784	bp->regs = mem;
2785	bp->num_queues = num_queues;
2786	bp->queue_mask = queue_mask;
2787	if (macb_config)
2788		bp->dma_burst_length = macb_config->dma_burst_length;
2789	bp->pclk = pclk;
2790	bp->hclk = hclk;
2791	bp->tx_clk = tx_clk;
2792	spin_lock_init(&bp->lock);
2793
2794	/* setup capabilities */
2795	macb_configure_caps(bp, macb_config);
2796
2797	platform_set_drvdata(pdev, dev);
2798
2799	dev->irq = platform_get_irq(pdev, 0);
2800	if (dev->irq < 0) {
2801		err = dev->irq;
2802		goto err_disable_clocks;
2803	}
2804
2805	mac = of_get_mac_address(np);
2806	if (mac)
2807		memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
2808	else
2809		macb_get_hwaddr(bp);
2810
2811	err = of_get_phy_mode(np);
2812	if (err < 0) {
2813		pdata = dev_get_platdata(&pdev->dev);
2814		if (pdata && pdata->is_rmii)
2815			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
2816		else
2817			bp->phy_interface = PHY_INTERFACE_MODE_MII;
2818	} else {
2819		bp->phy_interface = err;
2820	}
2821
2822	/* IP specific init */
2823	err = init(pdev);
2824	if (err)
2825		goto err_out_free_netdev;
2826
2827	err = register_netdev(dev);
2828	if (err) {
2829		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2830		goto err_out_unregister_netdev;
2831	}
2832
2833	err = macb_mii_init(bp);
2834	if (err)
2835		goto err_out_unregister_netdev;
2836
2837	netif_carrier_off(dev);
2838
2839	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
2840		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
2841		    dev->base_addr, dev->irq, dev->dev_addr);
2842
2843	phydev = bp->phy_dev;
2844	netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
2845		    phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
2846
2847	return 0;
2848
2849err_out_unregister_netdev:
2850	unregister_netdev(dev);
2851
2852err_out_free_netdev:
2853	free_netdev(dev);
2854
2855err_disable_clocks:
2856	clk_disable_unprepare(tx_clk);
2857	clk_disable_unprepare(hclk);
2858	clk_disable_unprepare(pclk);
2859
2860	return err;
2861}
2862
2863static int macb_remove(struct platform_device *pdev)
2864{
2865	struct net_device *dev;
2866	struct macb *bp;
2867
2868	dev = platform_get_drvdata(pdev);
2869
2870	if (dev) {
2871		bp = netdev_priv(dev);
2872		if (bp->phy_dev)
2873			phy_disconnect(bp->phy_dev);
2874		mdiobus_unregister(bp->mii_bus);
2875		kfree(bp->mii_bus->irq);
2876		mdiobus_free(bp->mii_bus);
2877		unregister_netdev(dev);
2878		clk_disable_unprepare(bp->tx_clk);
2879		clk_disable_unprepare(bp->hclk);
2880		clk_disable_unprepare(bp->pclk);
2881		free_netdev(dev);
2882	}
2883
2884	return 0;
2885}
2886
2887static int __maybe_unused macb_suspend(struct device *dev)
2888{
2889	struct platform_device *pdev = to_platform_device(dev);
2890	struct net_device *netdev = platform_get_drvdata(pdev);
2891	struct macb *bp = netdev_priv(netdev);
2892
2893	netif_carrier_off(netdev);
2894	netif_device_detach(netdev);
2895
2896	clk_disable_unprepare(bp->tx_clk);
2897	clk_disable_unprepare(bp->hclk);
2898	clk_disable_unprepare(bp->pclk);
2899
2900	return 0;
2901}
2902
2903static int __maybe_unused macb_resume(struct device *dev)
2904{
2905	struct platform_device *pdev = to_platform_device(dev);
2906	struct net_device *netdev = platform_get_drvdata(pdev);
2907	struct macb *bp = netdev_priv(netdev);
2908
2909	clk_prepare_enable(bp->pclk);
2910	clk_prepare_enable(bp->hclk);
2911	clk_prepare_enable(bp->tx_clk);
2912
2913	netif_device_attach(netdev);
2914
2915	return 0;
2916}
2917
2918static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
2919
2920static struct platform_driver macb_driver = {
2921	.probe		= macb_probe,
2922	.remove		= macb_remove,
2923	.driver		= {
2924		.name		= "macb",
2925		.of_match_table	= of_match_ptr(macb_dt_ids),
2926		.pm	= &macb_pm_ops,
2927	},
2928};
2929
2930module_platform_driver(macb_driver);
2931
2932MODULE_LICENSE("GPL");
2933MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
2934MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2935MODULE_ALIAS("platform:macb");
2936