1 /*
2  * Blackfin On-Chip MAC Driver
3  *
4  * Copyright 2004-2010 Analog Devices Inc.
5  *
6  * Enter bugs at http://blackfin.uclinux.org/
7  *
8  * Licensed under the GPL-2 or later.
9  */
10 
11 #define DRV_VERSION	"1.1"
12 #define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/irq.h>
25 #include <linux/io.h>
26 #include <linux/ioport.h>
27 #include <linux/crc32.h>
28 #include <linux/device.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/skbuff.h>
35 #include <linux/platform_device.h>
36 
37 #include <asm/dma.h>
38 #include <linux/dma-mapping.h>
39 
40 #include <asm/div64.h>
41 #include <asm/dpmc.h>
42 #include <asm/blackfin.h>
43 #include <asm/cacheflush.h>
44 #include <asm/portmux.h>
45 #include <mach/pll.h>
46 
47 #include "bfin_mac.h"
48 
49 MODULE_AUTHOR("Bryan Wu, Luke Yang");
50 MODULE_LICENSE("GPL");
51 MODULE_DESCRIPTION(DRV_DESC);
52 MODULE_ALIAS("platform:bfin_mac");
53 
54 #if defined(CONFIG_BFIN_MAC_USE_L1)
55 # define bfin_mac_alloc(dma_handle, size, num)  l1_data_sram_zalloc(size*num)
56 # define bfin_mac_free(dma_handle, ptr, num)    l1_data_sram_free(ptr)
57 #else
58 # define bfin_mac_alloc(dma_handle, size, num) \
59 	dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60 # define bfin_mac_free(dma_handle, ptr, num) \
61 	dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62 #endif
63 
64 #define PKT_BUF_SZ 1580
65 
66 #define MAX_TIMEOUT_CNT	500
67 
68 /* pointers to maintain transmit list */
69 static struct net_dma_desc_tx *tx_list_head;
70 static struct net_dma_desc_tx *tx_list_tail;
71 static struct net_dma_desc_rx *rx_list_head;
72 static struct net_dma_desc_rx *rx_list_tail;
73 static struct net_dma_desc_rx *current_rx_ptr;
74 static struct net_dma_desc_tx *current_tx_ptr;
75 static struct net_dma_desc_tx *tx_desc;
76 static struct net_dma_desc_rx *rx_desc;
77 
desc_list_free(void)78 static void desc_list_free(void)
79 {
80 	struct net_dma_desc_rx *r;
81 	struct net_dma_desc_tx *t;
82 	int i;
83 #if !defined(CONFIG_BFIN_MAC_USE_L1)
84 	dma_addr_t dma_handle = 0;
85 #endif
86 
87 	if (tx_desc) {
88 		t = tx_list_head;
89 		for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
90 			if (t) {
91 				if (t->skb) {
92 					dev_kfree_skb(t->skb);
93 					t->skb = NULL;
94 				}
95 				t = t->next;
96 			}
97 		}
98 		bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 	}
100 
101 	if (rx_desc) {
102 		r = rx_list_head;
103 		for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
104 			if (r) {
105 				if (r->skb) {
106 					dev_kfree_skb(r->skb);
107 					r->skb = NULL;
108 				}
109 				r = r->next;
110 			}
111 		}
112 		bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 	}
114 }
115 
desc_list_init(struct net_device * dev)116 static int desc_list_init(struct net_device *dev)
117 {
118 	int i;
119 	struct sk_buff *new_skb;
120 #if !defined(CONFIG_BFIN_MAC_USE_L1)
121 	/*
122 	 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 	 * The real dma handler is the return value of dma_alloc_coherent().
124 	 */
125 	dma_addr_t dma_handle;
126 #endif
127 
128 	tx_desc = bfin_mac_alloc(&dma_handle,
129 				sizeof(struct net_dma_desc_tx),
130 				CONFIG_BFIN_TX_DESC_NUM);
131 	if (tx_desc == NULL)
132 		goto init_error;
133 
134 	rx_desc = bfin_mac_alloc(&dma_handle,
135 				sizeof(struct net_dma_desc_rx),
136 				CONFIG_BFIN_RX_DESC_NUM);
137 	if (rx_desc == NULL)
138 		goto init_error;
139 
140 	/* init tx_list */
141 	tx_list_head = tx_list_tail = tx_desc;
142 
143 	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
144 		struct net_dma_desc_tx *t = tx_desc + i;
145 		struct dma_descriptor *a = &(t->desc_a);
146 		struct dma_descriptor *b = &(t->desc_b);
147 
148 		/*
149 		 * disable DMA
150 		 * read from memory WNR = 0
151 		 * wordsize is 32 bits
152 		 * 6 half words is desc size
153 		 * large desc flow
154 		 */
155 		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
156 		a->start_addr = (unsigned long)t->packet;
157 		a->x_count = 0;
158 		a->next_dma_desc = b;
159 
160 		/*
161 		 * enabled DMA
162 		 * write to memory WNR = 1
163 		 * wordsize is 32 bits
164 		 * disable interrupt
165 		 * 6 half words is desc size
166 		 * large desc flow
167 		 */
168 		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
169 		b->start_addr = (unsigned long)(&(t->status));
170 		b->x_count = 0;
171 
172 		t->skb = NULL;
173 		tx_list_tail->desc_b.next_dma_desc = a;
174 		tx_list_tail->next = t;
175 		tx_list_tail = t;
176 	}
177 	tx_list_tail->next = tx_list_head;	/* tx_list is a circle */
178 	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
179 	current_tx_ptr = tx_list_head;
180 
181 	/* init rx_list */
182 	rx_list_head = rx_list_tail = rx_desc;
183 
184 	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
185 		struct net_dma_desc_rx *r = rx_desc + i;
186 		struct dma_descriptor *a = &(r->desc_a);
187 		struct dma_descriptor *b = &(r->desc_b);
188 
189 		/* allocate a new skb for next time receive */
190 		new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 		if (!new_skb)
192 			goto init_error;
193 
194 		skb_reserve(new_skb, NET_IP_ALIGN);
195 		/* Invidate the data cache of skb->data range when it is write back
196 		 * cache. It will prevent overwritting the new data from DMA
197 		 */
198 		blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
199 					 (unsigned long)new_skb->end);
200 		r->skb = new_skb;
201 
202 		/*
203 		 * enabled DMA
204 		 * write to memory WNR = 1
205 		 * wordsize is 32 bits
206 		 * disable interrupt
207 		 * 6 half words is desc size
208 		 * large desc flow
209 		 */
210 		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
211 		/* since RXDWA is enabled */
212 		a->start_addr = (unsigned long)new_skb->data - 2;
213 		a->x_count = 0;
214 		a->next_dma_desc = b;
215 
216 		/*
217 		 * enabled DMA
218 		 * write to memory WNR = 1
219 		 * wordsize is 32 bits
220 		 * enable interrupt
221 		 * 6 half words is desc size
222 		 * large desc flow
223 		 */
224 		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
225 				NDSIZE_6 | DMAFLOW_LARGE;
226 		b->start_addr = (unsigned long)(&(r->status));
227 		b->x_count = 0;
228 
229 		rx_list_tail->desc_b.next_dma_desc = a;
230 		rx_list_tail->next = r;
231 		rx_list_tail = r;
232 	}
233 	rx_list_tail->next = rx_list_head;	/* rx_list is a circle */
234 	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
235 	current_rx_ptr = rx_list_head;
236 
237 	return 0;
238 
239 init_error:
240 	desc_list_free();
241 	pr_err("kmalloc failed\n");
242 	return -ENOMEM;
243 }
244 
245 
246 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
247 
248 /*
249  * MII operations
250  */
251 /* Wait until the previous MDC/MDIO transaction has completed */
bfin_mdio_poll(void)252 static int bfin_mdio_poll(void)
253 {
254 	int timeout_cnt = MAX_TIMEOUT_CNT;
255 
256 	/* poll the STABUSY bit */
257 	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
258 		udelay(1);
259 		if (timeout_cnt-- < 0) {
260 			pr_err("wait MDC/MDIO transaction to complete timeout\n");
261 			return -ETIMEDOUT;
262 		}
263 	}
264 
265 	return 0;
266 }
267 
268 /* Read an off-chip register in a PHY through the MDC/MDIO port */
bfin_mdiobus_read(struct mii_bus * bus,int phy_addr,int regnum)269 static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
270 {
271 	int ret;
272 
273 	ret = bfin_mdio_poll();
274 	if (ret)
275 		return ret;
276 
277 	/* read mode */
278 	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
279 				SET_REGAD((u16) regnum) |
280 				STABUSY);
281 
282 	ret = bfin_mdio_poll();
283 	if (ret)
284 		return ret;
285 
286 	return (int) bfin_read_EMAC_STADAT();
287 }
288 
289 /* Write an off-chip register in a PHY through the MDC/MDIO port */
bfin_mdiobus_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)290 static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
291 			      u16 value)
292 {
293 	int ret;
294 
295 	ret = bfin_mdio_poll();
296 	if (ret)
297 		return ret;
298 
299 	bfin_write_EMAC_STADAT((u32) value);
300 
301 	/* write mode */
302 	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
303 				SET_REGAD((u16) regnum) |
304 				STAOP |
305 				STABUSY);
306 
307 	return bfin_mdio_poll();
308 }
309 
bfin_mac_adjust_link(struct net_device * dev)310 static void bfin_mac_adjust_link(struct net_device *dev)
311 {
312 	struct bfin_mac_local *lp = netdev_priv(dev);
313 	struct phy_device *phydev = lp->phydev;
314 	unsigned long flags;
315 	int new_state = 0;
316 
317 	spin_lock_irqsave(&lp->lock, flags);
318 	if (phydev->link) {
319 		/* Now we make sure that we can be in full duplex mode.
320 		 * If not, we operate in half-duplex mode. */
321 		if (phydev->duplex != lp->old_duplex) {
322 			u32 opmode = bfin_read_EMAC_OPMODE();
323 			new_state = 1;
324 
325 			if (phydev->duplex)
326 				opmode |= FDMODE;
327 			else
328 				opmode &= ~(FDMODE);
329 
330 			bfin_write_EMAC_OPMODE(opmode);
331 			lp->old_duplex = phydev->duplex;
332 		}
333 
334 		if (phydev->speed != lp->old_speed) {
335 			if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
336 				u32 opmode = bfin_read_EMAC_OPMODE();
337 				switch (phydev->speed) {
338 				case 10:
339 					opmode |= RMII_10;
340 					break;
341 				case 100:
342 					opmode &= ~RMII_10;
343 					break;
344 				default:
345 					netdev_warn(dev,
346 						"Ack! Speed (%d) is not 10/100!\n",
347 						phydev->speed);
348 					break;
349 				}
350 				bfin_write_EMAC_OPMODE(opmode);
351 			}
352 
353 			new_state = 1;
354 			lp->old_speed = phydev->speed;
355 		}
356 
357 		if (!lp->old_link) {
358 			new_state = 1;
359 			lp->old_link = 1;
360 		}
361 	} else if (lp->old_link) {
362 		new_state = 1;
363 		lp->old_link = 0;
364 		lp->old_speed = 0;
365 		lp->old_duplex = -1;
366 	}
367 
368 	if (new_state) {
369 		u32 opmode = bfin_read_EMAC_OPMODE();
370 		phy_print_status(phydev);
371 		pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
372 	}
373 
374 	spin_unlock_irqrestore(&lp->lock, flags);
375 }
376 
377 /* MDC  = 2.5 MHz */
378 #define MDC_CLK 2500000
379 
mii_probe(struct net_device * dev,int phy_mode)380 static int mii_probe(struct net_device *dev, int phy_mode)
381 {
382 	struct bfin_mac_local *lp = netdev_priv(dev);
383 	struct phy_device *phydev = NULL;
384 	unsigned short sysctl;
385 	int i;
386 	u32 sclk, mdc_div;
387 
388 	/* Enable PHY output early */
389 	if (!(bfin_read_VR_CTL() & CLKBUFOE))
390 		bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
391 
392 	sclk = get_sclk();
393 	mdc_div = ((sclk / MDC_CLK) / 2) - 1;
394 
395 	sysctl = bfin_read_EMAC_SYSCTL();
396 	sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
397 	bfin_write_EMAC_SYSCTL(sysctl);
398 
399 	/* search for connected PHY device */
400 	for (i = 0; i < PHY_MAX_ADDR; ++i) {
401 		struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
402 
403 		if (!tmp_phydev)
404 			continue; /* no PHY here... */
405 
406 		phydev = tmp_phydev;
407 		break; /* found it */
408 	}
409 
410 	/* now we are supposed to have a proper phydev, to attach to... */
411 	if (!phydev) {
412 		netdev_err(dev, "no phy device found\n");
413 		return -ENODEV;
414 	}
415 
416 	if (phy_mode != PHY_INTERFACE_MODE_RMII &&
417 		phy_mode != PHY_INTERFACE_MODE_MII) {
418 		netdev_err(dev, "invalid phy interface mode\n");
419 		return -EINVAL;
420 	}
421 
422 	phydev = phy_connect(dev, dev_name(&phydev->dev),
423 			     &bfin_mac_adjust_link, phy_mode);
424 
425 	if (IS_ERR(phydev)) {
426 		netdev_err(dev, "could not attach PHY\n");
427 		return PTR_ERR(phydev);
428 	}
429 
430 	/* mask with MAC supported features */
431 	phydev->supported &= (SUPPORTED_10baseT_Half
432 			      | SUPPORTED_10baseT_Full
433 			      | SUPPORTED_100baseT_Half
434 			      | SUPPORTED_100baseT_Full
435 			      | SUPPORTED_Autoneg
436 			      | SUPPORTED_Pause | SUPPORTED_Asym_Pause
437 			      | SUPPORTED_MII
438 			      | SUPPORTED_TP);
439 
440 	phydev->advertising = phydev->supported;
441 
442 	lp->old_link = 0;
443 	lp->old_speed = 0;
444 	lp->old_duplex = -1;
445 	lp->phydev = phydev;
446 
447 	pr_info("attached PHY driver [%s] "
448 	        "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
449 	        phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
450 	        MDC_CLK, mdc_div, sclk/1000000);
451 
452 	return 0;
453 }
454 
455 /*
456  * Ethtool support
457  */
458 
459 /*
460  * interrupt routine for magic packet wakeup
461  */
bfin_mac_wake_interrupt(int irq,void * dev_id)462 static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
463 {
464 	return IRQ_HANDLED;
465 }
466 
467 static int
bfin_mac_ethtool_getsettings(struct net_device * dev,struct ethtool_cmd * cmd)468 bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
469 {
470 	struct bfin_mac_local *lp = netdev_priv(dev);
471 
472 	if (lp->phydev)
473 		return phy_ethtool_gset(lp->phydev, cmd);
474 
475 	return -EINVAL;
476 }
477 
478 static int
bfin_mac_ethtool_setsettings(struct net_device * dev,struct ethtool_cmd * cmd)479 bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
480 {
481 	struct bfin_mac_local *lp = netdev_priv(dev);
482 
483 	if (!capable(CAP_NET_ADMIN))
484 		return -EPERM;
485 
486 	if (lp->phydev)
487 		return phy_ethtool_sset(lp->phydev, cmd);
488 
489 	return -EINVAL;
490 }
491 
bfin_mac_ethtool_getdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)492 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
493 					struct ethtool_drvinfo *info)
494 {
495 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
496 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
497 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
498 	strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
499 }
500 
bfin_mac_ethtool_getwol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)501 static void bfin_mac_ethtool_getwol(struct net_device *dev,
502 	struct ethtool_wolinfo *wolinfo)
503 {
504 	struct bfin_mac_local *lp = netdev_priv(dev);
505 
506 	wolinfo->supported = WAKE_MAGIC;
507 	wolinfo->wolopts = lp->wol;
508 }
509 
bfin_mac_ethtool_setwol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)510 static int bfin_mac_ethtool_setwol(struct net_device *dev,
511 	struct ethtool_wolinfo *wolinfo)
512 {
513 	struct bfin_mac_local *lp = netdev_priv(dev);
514 	int rc;
515 
516 	if (wolinfo->wolopts & (WAKE_MAGICSECURE |
517 				WAKE_UCAST |
518 				WAKE_MCAST |
519 				WAKE_BCAST |
520 				WAKE_ARP))
521 		return -EOPNOTSUPP;
522 
523 	lp->wol = wolinfo->wolopts;
524 
525 	if (lp->wol && !lp->irq_wake_requested) {
526 		/* register wake irq handler */
527 		rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
528 				 0, "EMAC_WAKE", dev);
529 		if (rc)
530 			return rc;
531 		lp->irq_wake_requested = true;
532 	}
533 
534 	if (!lp->wol && lp->irq_wake_requested) {
535 		free_irq(IRQ_MAC_WAKEDET, dev);
536 		lp->irq_wake_requested = false;
537 	}
538 
539 	/* Make sure the PHY driver doesn't suspend */
540 	device_init_wakeup(&dev->dev, lp->wol);
541 
542 	return 0;
543 }
544 
545 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
bfin_mac_ethtool_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)546 static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
547 	struct ethtool_ts_info *info)
548 {
549 	struct bfin_mac_local *lp = netdev_priv(dev);
550 
551 	info->so_timestamping =
552 		SOF_TIMESTAMPING_TX_HARDWARE |
553 		SOF_TIMESTAMPING_RX_HARDWARE |
554 		SOF_TIMESTAMPING_RAW_HARDWARE;
555 	info->phc_index = lp->phc_index;
556 	info->tx_types =
557 		(1 << HWTSTAMP_TX_OFF) |
558 		(1 << HWTSTAMP_TX_ON);
559 	info->rx_filters =
560 		(1 << HWTSTAMP_FILTER_NONE) |
561 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
562 		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
563 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
564 	return 0;
565 }
566 #endif
567 
568 static const struct ethtool_ops bfin_mac_ethtool_ops = {
569 	.get_settings = bfin_mac_ethtool_getsettings,
570 	.set_settings = bfin_mac_ethtool_setsettings,
571 	.get_link = ethtool_op_get_link,
572 	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
573 	.get_wol = bfin_mac_ethtool_getwol,
574 	.set_wol = bfin_mac_ethtool_setwol,
575 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
576 	.get_ts_info = bfin_mac_ethtool_get_ts_info,
577 #endif
578 };
579 
580 /**************************************************************************/
setup_system_regs(struct net_device * dev)581 static void setup_system_regs(struct net_device *dev)
582 {
583 	struct bfin_mac_local *lp = netdev_priv(dev);
584 	int i;
585 	unsigned short sysctl;
586 
587 	/*
588 	 * Odd word alignment for Receive Frame DMA word
589 	 * Configure checksum support and rcve frame word alignment
590 	 */
591 	sysctl = bfin_read_EMAC_SYSCTL();
592 	/*
593 	 * check if interrupt is requested for any PHY,
594 	 * enable PHY interrupt only if needed
595 	 */
596 	for (i = 0; i < PHY_MAX_ADDR; ++i)
597 		if (lp->mii_bus->irq[i] != PHY_POLL)
598 			break;
599 	if (i < PHY_MAX_ADDR)
600 		sysctl |= PHYIE;
601 	sysctl |= RXDWA;
602 #if defined(BFIN_MAC_CSUM_OFFLOAD)
603 	sysctl |= RXCKS;
604 #else
605 	sysctl &= ~RXCKS;
606 #endif
607 	bfin_write_EMAC_SYSCTL(sysctl);
608 
609 	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
610 
611 	/* Set vlan regs to let 1522 bytes long packets pass through */
612 	bfin_write_EMAC_VLAN1(lp->vlan1_mask);
613 	bfin_write_EMAC_VLAN2(lp->vlan2_mask);
614 
615 	/* Initialize the TX DMA channel registers */
616 	bfin_write_DMA2_X_COUNT(0);
617 	bfin_write_DMA2_X_MODIFY(4);
618 	bfin_write_DMA2_Y_COUNT(0);
619 	bfin_write_DMA2_Y_MODIFY(0);
620 
621 	/* Initialize the RX DMA channel registers */
622 	bfin_write_DMA1_X_COUNT(0);
623 	bfin_write_DMA1_X_MODIFY(4);
624 	bfin_write_DMA1_Y_COUNT(0);
625 	bfin_write_DMA1_Y_MODIFY(0);
626 }
627 
setup_mac_addr(u8 * mac_addr)628 static void setup_mac_addr(u8 *mac_addr)
629 {
630 	u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
631 	u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
632 
633 	/* this depends on a little-endian machine */
634 	bfin_write_EMAC_ADDRLO(addr_low);
635 	bfin_write_EMAC_ADDRHI(addr_hi);
636 }
637 
bfin_mac_set_mac_address(struct net_device * dev,void * p)638 static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
639 {
640 	struct sockaddr *addr = p;
641 	if (netif_running(dev))
642 		return -EBUSY;
643 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
644 	setup_mac_addr(dev->dev_addr);
645 	return 0;
646 }
647 
648 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
649 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
650 
bfin_select_phc_clock(u32 input_clk,unsigned int * shift_result)651 static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
652 {
653 	u32 ipn = 1000000000UL / input_clk;
654 	u32 ppn = 1;
655 	unsigned int shift = 0;
656 
657 	while (ppn <= ipn) {
658 		ppn <<= 1;
659 		shift++;
660 	}
661 	*shift_result = shift;
662 	return 1000000000UL / ppn;
663 }
664 
bfin_mac_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)665 static int bfin_mac_hwtstamp_set(struct net_device *netdev,
666 				 struct ifreq *ifr)
667 {
668 	struct hwtstamp_config config;
669 	struct bfin_mac_local *lp = netdev_priv(netdev);
670 	u16 ptpctl;
671 	u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
672 
673 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
674 		return -EFAULT;
675 
676 	pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
677 			__func__, config.flags, config.tx_type, config.rx_filter);
678 
679 	/* reserved for future extensions */
680 	if (config.flags)
681 		return -EINVAL;
682 
683 	if ((config.tx_type != HWTSTAMP_TX_OFF) &&
684 			(config.tx_type != HWTSTAMP_TX_ON))
685 		return -ERANGE;
686 
687 	ptpctl = bfin_read_EMAC_PTP_CTL();
688 
689 	switch (config.rx_filter) {
690 	case HWTSTAMP_FILTER_NONE:
691 		/*
692 		 * Dont allow any timestamping
693 		 */
694 		ptpfv3 = 0xFFFFFFFF;
695 		bfin_write_EMAC_PTP_FV3(ptpfv3);
696 		break;
697 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
698 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
699 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
700 		/*
701 		 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
702 		 * to enable all the field matches.
703 		 */
704 		ptpctl &= ~0x1F00;
705 		bfin_write_EMAC_PTP_CTL(ptpctl);
706 		/*
707 		 * Keep the default values of the EMAC_PTP_FOFF register.
708 		 */
709 		ptpfoff = 0x4A24170C;
710 		bfin_write_EMAC_PTP_FOFF(ptpfoff);
711 		/*
712 		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
713 		 * registers.
714 		 */
715 		ptpfv1 = 0x11040800;
716 		bfin_write_EMAC_PTP_FV1(ptpfv1);
717 		ptpfv2 = 0x0140013F;
718 		bfin_write_EMAC_PTP_FV2(ptpfv2);
719 		/*
720 		 * The default value (0xFFFC) allows the timestamping of both
721 		 * received Sync messages and Delay_Req messages.
722 		 */
723 		ptpfv3 = 0xFFFFFFFC;
724 		bfin_write_EMAC_PTP_FV3(ptpfv3);
725 
726 		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
727 		break;
728 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
729 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
730 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
731 		/* Clear all five comparison mask bits (bits[12:8]) in the
732 		 * EMAC_PTP_CTL register to enable all the field matches.
733 		 */
734 		ptpctl &= ~0x1F00;
735 		bfin_write_EMAC_PTP_CTL(ptpctl);
736 		/*
737 		 * Keep the default values of the EMAC_PTP_FOFF register, except set
738 		 * the PTPCOF field to 0x2A.
739 		 */
740 		ptpfoff = 0x2A24170C;
741 		bfin_write_EMAC_PTP_FOFF(ptpfoff);
742 		/*
743 		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
744 		 * registers.
745 		 */
746 		ptpfv1 = 0x11040800;
747 		bfin_write_EMAC_PTP_FV1(ptpfv1);
748 		ptpfv2 = 0x0140013F;
749 		bfin_write_EMAC_PTP_FV2(ptpfv2);
750 		/*
751 		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
752 		 * the value to 0xFFF0.
753 		 */
754 		ptpfv3 = 0xFFFFFFF0;
755 		bfin_write_EMAC_PTP_FV3(ptpfv3);
756 
757 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
758 		break;
759 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
760 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
761 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
762 		/*
763 		 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
764 		 * EFTM and PTPCM field comparison.
765 		 */
766 		ptpctl &= ~0x1100;
767 		bfin_write_EMAC_PTP_CTL(ptpctl);
768 		/*
769 		 * Keep the default values of all the fields of the EMAC_PTP_FOFF
770 		 * register, except set the PTPCOF field to 0x0E.
771 		 */
772 		ptpfoff = 0x0E24170C;
773 		bfin_write_EMAC_PTP_FOFF(ptpfoff);
774 		/*
775 		 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
776 		 * corresponds to PTP messages on the MAC layer.
777 		 */
778 		ptpfv1 = 0x110488F7;
779 		bfin_write_EMAC_PTP_FV1(ptpfv1);
780 		ptpfv2 = 0x0140013F;
781 		bfin_write_EMAC_PTP_FV2(ptpfv2);
782 		/*
783 		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
784 		 * messages, set the value to 0xFFF0.
785 		 */
786 		ptpfv3 = 0xFFFFFFF0;
787 		bfin_write_EMAC_PTP_FV3(ptpfv3);
788 
789 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
790 		break;
791 	default:
792 		return -ERANGE;
793 	}
794 
795 	if (config.tx_type == HWTSTAMP_TX_OFF &&
796 	    bfin_mac_hwtstamp_is_none(config.rx_filter)) {
797 		ptpctl &= ~PTP_EN;
798 		bfin_write_EMAC_PTP_CTL(ptpctl);
799 
800 		SSYNC();
801 	} else {
802 		ptpctl |= PTP_EN;
803 		bfin_write_EMAC_PTP_CTL(ptpctl);
804 
805 		/*
806 		 * clear any existing timestamp
807 		 */
808 		bfin_read_EMAC_PTP_RXSNAPLO();
809 		bfin_read_EMAC_PTP_RXSNAPHI();
810 
811 		bfin_read_EMAC_PTP_TXSNAPLO();
812 		bfin_read_EMAC_PTP_TXSNAPHI();
813 
814 		SSYNC();
815 	}
816 
817 	lp->stamp_cfg = config;
818 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
819 		-EFAULT : 0;
820 }
821 
bfin_mac_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)822 static int bfin_mac_hwtstamp_get(struct net_device *netdev,
823 				 struct ifreq *ifr)
824 {
825 	struct bfin_mac_local *lp = netdev_priv(netdev);
826 
827 	return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
828 			    sizeof(lp->stamp_cfg)) ?
829 		-EFAULT : 0;
830 }
831 
bfin_tx_hwtstamp(struct net_device * netdev,struct sk_buff * skb)832 static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
833 {
834 	struct bfin_mac_local *lp = netdev_priv(netdev);
835 
836 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
837 		int timeout_cnt = MAX_TIMEOUT_CNT;
838 
839 		/* When doing time stamping, keep the connection to the socket
840 		 * a while longer
841 		 */
842 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
843 
844 		/*
845 		 * The timestamping is done at the EMAC module's MII/RMII interface
846 		 * when the module sees the Start of Frame of an event message packet. This
847 		 * interface is the closest possible place to the physical Ethernet transmission
848 		 * medium, providing the best timing accuracy.
849 		 */
850 		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
851 			udelay(1);
852 		if (timeout_cnt == 0)
853 			netdev_err(netdev, "timestamp the TX packet failed\n");
854 		else {
855 			struct skb_shared_hwtstamps shhwtstamps;
856 			u64 ns;
857 			u64 regval;
858 
859 			regval = bfin_read_EMAC_PTP_TXSNAPLO();
860 			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
861 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
862 			ns = regval << lp->shift;
863 			shhwtstamps.hwtstamp = ns_to_ktime(ns);
864 			skb_tstamp_tx(skb, &shhwtstamps);
865 		}
866 	}
867 }
868 
bfin_rx_hwtstamp(struct net_device * netdev,struct sk_buff * skb)869 static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
870 {
871 	struct bfin_mac_local *lp = netdev_priv(netdev);
872 	u32 valid;
873 	u64 regval, ns;
874 	struct skb_shared_hwtstamps *shhwtstamps;
875 
876 	if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
877 		return;
878 
879 	valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
880 	if (!valid)
881 		return;
882 
883 	shhwtstamps = skb_hwtstamps(skb);
884 
885 	regval = bfin_read_EMAC_PTP_RXSNAPLO();
886 	regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
887 	ns = regval << lp->shift;
888 	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
889 	shhwtstamps->hwtstamp = ns_to_ktime(ns);
890 }
891 
bfin_mac_hwtstamp_init(struct net_device * netdev)892 static void bfin_mac_hwtstamp_init(struct net_device *netdev)
893 {
894 	struct bfin_mac_local *lp = netdev_priv(netdev);
895 	u64 addend, ppb;
896 	u32 input_clk, phc_clk;
897 
898 	/* Initialize hardware timer */
899 	input_clk = get_sclk();
900 	phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
901 	addend = phc_clk * (1ULL << 32);
902 	do_div(addend, input_clk);
903 	bfin_write_EMAC_PTP_ADDEND((u32)addend);
904 
905 	lp->addend = addend;
906 	ppb = 1000000000ULL * input_clk;
907 	do_div(ppb, phc_clk);
908 	lp->max_ppb = ppb - 1000000000ULL - 1ULL;
909 
910 	/* Initialize hwstamp config */
911 	lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
912 	lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
913 }
914 
bfin_ptp_time_read(struct bfin_mac_local * lp)915 static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
916 {
917 	u64 ns;
918 	u32 lo, hi;
919 
920 	lo = bfin_read_EMAC_PTP_TIMELO();
921 	hi = bfin_read_EMAC_PTP_TIMEHI();
922 
923 	ns = ((u64) hi) << 32;
924 	ns |= lo;
925 	ns <<= lp->shift;
926 
927 	return ns;
928 }
929 
bfin_ptp_time_write(struct bfin_mac_local * lp,u64 ns)930 static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
931 {
932 	u32 hi, lo;
933 
934 	ns >>= lp->shift;
935 	hi = ns >> 32;
936 	lo = ns & 0xffffffff;
937 
938 	bfin_write_EMAC_PTP_TIMELO(lo);
939 	bfin_write_EMAC_PTP_TIMEHI(hi);
940 }
941 
942 /* PTP Hardware Clock operations */
943 
bfin_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)944 static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
945 {
946 	u64 adj;
947 	u32 diff, addend;
948 	int neg_adj = 0;
949 	struct bfin_mac_local *lp =
950 		container_of(ptp, struct bfin_mac_local, caps);
951 
952 	if (ppb < 0) {
953 		neg_adj = 1;
954 		ppb = -ppb;
955 	}
956 	addend = lp->addend;
957 	adj = addend;
958 	adj *= ppb;
959 	diff = div_u64(adj, 1000000000ULL);
960 
961 	addend = neg_adj ? addend - diff : addend + diff;
962 
963 	bfin_write_EMAC_PTP_ADDEND(addend);
964 
965 	return 0;
966 }
967 
bfin_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)968 static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
969 {
970 	s64 now;
971 	unsigned long flags;
972 	struct bfin_mac_local *lp =
973 		container_of(ptp, struct bfin_mac_local, caps);
974 
975 	spin_lock_irqsave(&lp->phc_lock, flags);
976 
977 	now = bfin_ptp_time_read(lp);
978 	now += delta;
979 	bfin_ptp_time_write(lp, now);
980 
981 	spin_unlock_irqrestore(&lp->phc_lock, flags);
982 
983 	return 0;
984 }
985 
bfin_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)986 static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
987 {
988 	u64 ns;
989 	unsigned long flags;
990 	struct bfin_mac_local *lp =
991 		container_of(ptp, struct bfin_mac_local, caps);
992 
993 	spin_lock_irqsave(&lp->phc_lock, flags);
994 
995 	ns = bfin_ptp_time_read(lp);
996 
997 	spin_unlock_irqrestore(&lp->phc_lock, flags);
998 
999 	*ts = ns_to_timespec64(ns);
1000 
1001 	return 0;
1002 }
1003 
bfin_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)1004 static int bfin_ptp_settime(struct ptp_clock_info *ptp,
1005 			   const struct timespec64 *ts)
1006 {
1007 	u64 ns;
1008 	unsigned long flags;
1009 	struct bfin_mac_local *lp =
1010 		container_of(ptp, struct bfin_mac_local, caps);
1011 
1012 	ns = timespec64_to_ns(ts);
1013 
1014 	spin_lock_irqsave(&lp->phc_lock, flags);
1015 
1016 	bfin_ptp_time_write(lp, ns);
1017 
1018 	spin_unlock_irqrestore(&lp->phc_lock, flags);
1019 
1020 	return 0;
1021 }
1022 
bfin_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)1023 static int bfin_ptp_enable(struct ptp_clock_info *ptp,
1024 			  struct ptp_clock_request *rq, int on)
1025 {
1026 	return -EOPNOTSUPP;
1027 }
1028 
1029 static struct ptp_clock_info bfin_ptp_caps = {
1030 	.owner		= THIS_MODULE,
1031 	.name		= "BF518 clock",
1032 	.max_adj	= 0,
1033 	.n_alarm	= 0,
1034 	.n_ext_ts	= 0,
1035 	.n_per_out	= 0,
1036 	.n_pins		= 0,
1037 	.pps		= 0,
1038 	.adjfreq	= bfin_ptp_adjfreq,
1039 	.adjtime	= bfin_ptp_adjtime,
1040 	.gettime64	= bfin_ptp_gettime,
1041 	.settime64	= bfin_ptp_settime,
1042 	.enable		= bfin_ptp_enable,
1043 };
1044 
bfin_phc_init(struct net_device * netdev,struct device * dev)1045 static int bfin_phc_init(struct net_device *netdev, struct device *dev)
1046 {
1047 	struct bfin_mac_local *lp = netdev_priv(netdev);
1048 
1049 	lp->caps = bfin_ptp_caps;
1050 	lp->caps.max_adj = lp->max_ppb;
1051 	lp->clock = ptp_clock_register(&lp->caps, dev);
1052 	if (IS_ERR(lp->clock))
1053 		return PTR_ERR(lp->clock);
1054 
1055 	lp->phc_index = ptp_clock_index(lp->clock);
1056 	spin_lock_init(&lp->phc_lock);
1057 
1058 	return 0;
1059 }
1060 
bfin_phc_release(struct bfin_mac_local * lp)1061 static void bfin_phc_release(struct bfin_mac_local *lp)
1062 {
1063 	ptp_clock_unregister(lp->clock);
1064 }
1065 
1066 #else
1067 # define bfin_mac_hwtstamp_is_none(cfg) 0
1068 # define bfin_mac_hwtstamp_init(dev)
1069 # define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
1070 # define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
1071 # define bfin_rx_hwtstamp(dev, skb)
1072 # define bfin_tx_hwtstamp(dev, skb)
1073 # define bfin_phc_init(netdev, dev) 0
1074 # define bfin_phc_release(lp)
1075 #endif
1076 
_tx_reclaim_skb(void)1077 static inline void _tx_reclaim_skb(void)
1078 {
1079 	do {
1080 		tx_list_head->desc_a.config &= ~DMAEN;
1081 		tx_list_head->status.status_word = 0;
1082 		if (tx_list_head->skb) {
1083 			dev_consume_skb_any(tx_list_head->skb);
1084 			tx_list_head->skb = NULL;
1085 		}
1086 		tx_list_head = tx_list_head->next;
1087 
1088 	} while (tx_list_head->status.status_word != 0);
1089 }
1090 
tx_reclaim_skb(struct bfin_mac_local * lp)1091 static void tx_reclaim_skb(struct bfin_mac_local *lp)
1092 {
1093 	int timeout_cnt = MAX_TIMEOUT_CNT;
1094 
1095 	if (tx_list_head->status.status_word != 0)
1096 		_tx_reclaim_skb();
1097 
1098 	if (current_tx_ptr->next == tx_list_head) {
1099 		while (tx_list_head->status.status_word == 0) {
1100 			/* slow down polling to avoid too many queue stop. */
1101 			udelay(10);
1102 			/* reclaim skb if DMA is not running. */
1103 			if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
1104 				break;
1105 			if (timeout_cnt-- < 0)
1106 				break;
1107 		}
1108 
1109 		if (timeout_cnt >= 0)
1110 			_tx_reclaim_skb();
1111 		else
1112 			netif_stop_queue(lp->ndev);
1113 	}
1114 
1115 	if (current_tx_ptr->next != tx_list_head &&
1116 		netif_queue_stopped(lp->ndev))
1117 		netif_wake_queue(lp->ndev);
1118 
1119 	if (tx_list_head != current_tx_ptr) {
1120 		/* shorten the timer interval if tx queue is stopped */
1121 		if (netif_queue_stopped(lp->ndev))
1122 			lp->tx_reclaim_timer.expires =
1123 				jiffies + (TX_RECLAIM_JIFFIES >> 4);
1124 		else
1125 			lp->tx_reclaim_timer.expires =
1126 				jiffies + TX_RECLAIM_JIFFIES;
1127 
1128 		mod_timer(&lp->tx_reclaim_timer,
1129 			lp->tx_reclaim_timer.expires);
1130 	}
1131 
1132 	return;
1133 }
1134 
tx_reclaim_skb_timeout(unsigned long lp)1135 static void tx_reclaim_skb_timeout(unsigned long lp)
1136 {
1137 	tx_reclaim_skb((struct bfin_mac_local *)lp);
1138 }
1139 
bfin_mac_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)1140 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1141 				struct net_device *dev)
1142 {
1143 	struct bfin_mac_local *lp = netdev_priv(dev);
1144 	u16 *data;
1145 	u32 data_align = (unsigned long)(skb->data) & 0x3;
1146 
1147 	current_tx_ptr->skb = skb;
1148 
1149 	if (data_align == 0x2) {
1150 		/* move skb->data to current_tx_ptr payload */
1151 		data = (u16 *)(skb->data) - 1;
1152 		*data = (u16)(skb->len);
1153 		/*
1154 		 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1155 		 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1156 		 * of this field are the length of the packet payload in bytes and the higher
1157 		 * 4 bits are the timestamping enable field.
1158 		 */
1159 		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1160 			*data |= 0x1000;
1161 
1162 		current_tx_ptr->desc_a.start_addr = (u32)data;
1163 		/* this is important! */
1164 		blackfin_dcache_flush_range((u32)data,
1165 				(u32)((u8 *)data + skb->len + 4));
1166 	} else {
1167 		*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1168 		/* enable timestamping for the sent packet */
1169 		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1170 			*((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1171 		memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1172 			skb->len);
1173 		current_tx_ptr->desc_a.start_addr =
1174 			(u32)current_tx_ptr->packet;
1175 		blackfin_dcache_flush_range(
1176 			(u32)current_tx_ptr->packet,
1177 			(u32)(current_tx_ptr->packet + skb->len + 2));
1178 	}
1179 
1180 	/* make sure the internal data buffers in the core are drained
1181 	 * so that the DMA descriptors are completely written when the
1182 	 * DMA engine goes to fetch them below
1183 	 */
1184 	SSYNC();
1185 
1186 	/* always clear status buffer before start tx dma */
1187 	current_tx_ptr->status.status_word = 0;
1188 
1189 	/* enable this packet's dma */
1190 	current_tx_ptr->desc_a.config |= DMAEN;
1191 
1192 	/* tx dma is running, just return */
1193 	if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1194 		goto out;
1195 
1196 	/* tx dma is not running */
1197 	bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
1198 	/* dma enabled, read from memory, size is 6 */
1199 	bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
1200 	/* Turn on the EMAC tx */
1201 	bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1202 
1203 out:
1204 	bfin_tx_hwtstamp(dev, skb);
1205 
1206 	current_tx_ptr = current_tx_ptr->next;
1207 	dev->stats.tx_packets++;
1208 	dev->stats.tx_bytes += (skb->len);
1209 
1210 	tx_reclaim_skb(lp);
1211 
1212 	return NETDEV_TX_OK;
1213 }
1214 
1215 #define IP_HEADER_OFF  0
1216 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1217 	RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1218 
bfin_mac_rx(struct bfin_mac_local * lp)1219 static void bfin_mac_rx(struct bfin_mac_local *lp)
1220 {
1221 	struct net_device *dev = lp->ndev;
1222 	struct sk_buff *skb, *new_skb;
1223 	unsigned short len;
1224 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1225 	unsigned int i;
1226 	unsigned char fcs[ETH_FCS_LEN + 1];
1227 #endif
1228 
1229 	/* check if frame status word reports an error condition
1230 	 * we which case we simply drop the packet
1231 	 */
1232 	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1233 		netdev_notice(dev, "rx: receive error - packet dropped\n");
1234 		dev->stats.rx_dropped++;
1235 		goto out;
1236 	}
1237 
1238 	/* allocate a new skb for next time receive */
1239 	skb = current_rx_ptr->skb;
1240 
1241 	new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1242 	if (!new_skb) {
1243 		dev->stats.rx_dropped++;
1244 		goto out;
1245 	}
1246 	/* reserve 2 bytes for RXDWA padding */
1247 	skb_reserve(new_skb, NET_IP_ALIGN);
1248 	/* Invidate the data cache of skb->data range when it is write back
1249 	 * cache. It will prevent overwritting the new data from DMA
1250 	 */
1251 	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1252 					 (unsigned long)new_skb->end);
1253 
1254 	current_rx_ptr->skb = new_skb;
1255 	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1256 
1257 	len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN);
1258 	/* Deduce Ethernet FCS length from Ethernet payload length */
1259 	len -= ETH_FCS_LEN;
1260 	skb_put(skb, len);
1261 
1262 	skb->protocol = eth_type_trans(skb, dev);
1263 
1264 	bfin_rx_hwtstamp(dev, skb);
1265 
1266 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1267 	/* Checksum offloading only works for IPv4 packets with the standard IP header
1268 	 * length of 20 bytes, because the blackfin MAC checksum calculation is
1269 	 * based on that assumption. We must NOT use the calculated checksum if our
1270 	 * IP version or header break that assumption.
1271 	 */
1272 	if (skb->data[IP_HEADER_OFF] == 0x45) {
1273 		skb->csum = current_rx_ptr->status.ip_payload_csum;
1274 		/*
1275 		 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1276 		 * IP checksum is based on 16-bit one's complement algorithm.
1277 		 * To deduce a value from checksum is equal to add its inversion.
1278 		 * If the IP payload len is odd, the inversed FCS should also
1279 		 * begin from odd address and leave first byte zero.
1280 		 */
1281 		if (skb->len % 2) {
1282 			fcs[0] = 0;
1283 			for (i = 0; i < ETH_FCS_LEN; i++)
1284 				fcs[i + 1] = ~skb->data[skb->len + i];
1285 			skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1286 		} else {
1287 			for (i = 0; i < ETH_FCS_LEN; i++)
1288 				fcs[i] = ~skb->data[skb->len + i];
1289 			skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1290 		}
1291 		skb->ip_summed = CHECKSUM_COMPLETE;
1292 	}
1293 #endif
1294 
1295 	napi_gro_receive(&lp->napi, skb);
1296 
1297 	dev->stats.rx_packets++;
1298 	dev->stats.rx_bytes += len;
1299 out:
1300 	current_rx_ptr->status.status_word = 0x00000000;
1301 	current_rx_ptr = current_rx_ptr->next;
1302 }
1303 
bfin_mac_poll(struct napi_struct * napi,int budget)1304 static int bfin_mac_poll(struct napi_struct *napi, int budget)
1305 {
1306 	int i = 0;
1307 	struct bfin_mac_local *lp = container_of(napi,
1308 						 struct bfin_mac_local,
1309 						 napi);
1310 
1311 	while (current_rx_ptr->status.status_word != 0 && i < budget) {
1312 		bfin_mac_rx(lp);
1313 		i++;
1314 	}
1315 
1316 	if (i < budget) {
1317 		napi_complete(napi);
1318 		if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
1319 			enable_irq(IRQ_MAC_RX);
1320 	}
1321 
1322 	return i;
1323 }
1324 
1325 /* interrupt routine to handle rx and error signal */
bfin_mac_interrupt(int irq,void * dev_id)1326 static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1327 {
1328 	struct bfin_mac_local *lp = netdev_priv(dev_id);
1329 	u32 status;
1330 
1331 	status = bfin_read_DMA1_IRQ_STATUS();
1332 
1333 	bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR);
1334 	if (status & DMA_DONE) {
1335 		disable_irq_nosync(IRQ_MAC_RX);
1336 		set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags);
1337 		napi_schedule(&lp->napi);
1338 	}
1339 
1340 	return IRQ_HANDLED;
1341 }
1342 
1343 #ifdef CONFIG_NET_POLL_CONTROLLER
bfin_mac_poll_controller(struct net_device * dev)1344 static void bfin_mac_poll_controller(struct net_device *dev)
1345 {
1346 	struct bfin_mac_local *lp = netdev_priv(dev);
1347 
1348 	bfin_mac_interrupt(IRQ_MAC_RX, dev);
1349 	tx_reclaim_skb(lp);
1350 }
1351 #endif				/* CONFIG_NET_POLL_CONTROLLER */
1352 
bfin_mac_disable(void)1353 static void bfin_mac_disable(void)
1354 {
1355 	unsigned int opmode;
1356 
1357 	opmode = bfin_read_EMAC_OPMODE();
1358 	opmode &= (~RE);
1359 	opmode &= (~TE);
1360 	/* Turn off the EMAC */
1361 	bfin_write_EMAC_OPMODE(opmode);
1362 }
1363 
1364 /*
1365  * Enable Interrupts, Receive, and Transmit
1366  */
bfin_mac_enable(struct phy_device * phydev)1367 static int bfin_mac_enable(struct phy_device *phydev)
1368 {
1369 	int ret;
1370 	u32 opmode;
1371 
1372 	pr_debug("%s\n", __func__);
1373 
1374 	/* Set RX DMA */
1375 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
1376 	bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
1377 
1378 	/* Wait MII done */
1379 	ret = bfin_mdio_poll();
1380 	if (ret)
1381 		return ret;
1382 
1383 	/* We enable only RX here */
1384 	/* ASTP   : Enable Automatic Pad Stripping
1385 	   PR     : Promiscuous Mode for test
1386 	   PSF    : Receive frames with total length less than 64 bytes.
1387 	   FDMODE : Full Duplex Mode
1388 	   LB     : Internal Loopback for test
1389 	   RE     : Receiver Enable */
1390 	opmode = bfin_read_EMAC_OPMODE();
1391 	if (opmode & FDMODE)
1392 		opmode |= PSF;
1393 	else
1394 		opmode |= DRO | DC | PSF;
1395 	opmode |= RE;
1396 
1397 	if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1398 		opmode |= RMII; /* For Now only 100MBit are supported */
1399 #if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1400 		if (__SILICON_REVISION__ < 3) {
1401 			/*
1402 			 * This isn't publicly documented (fun times!), but in
1403 			 * silicon <=0.2, the RX and TX pins are clocked together.
1404 			 * So in order to recv, we must enable the transmit side
1405 			 * as well.  This will cause a spurious TX interrupt too,
1406 			 * but we can easily consume that.
1407 			 */
1408 			opmode |= TE;
1409 		}
1410 #endif
1411 	}
1412 
1413 	/* Turn on the EMAC rx */
1414 	bfin_write_EMAC_OPMODE(opmode);
1415 
1416 	return 0;
1417 }
1418 
1419 /* Our watchdog timed out. Called by the networking layer */
bfin_mac_timeout(struct net_device * dev)1420 static void bfin_mac_timeout(struct net_device *dev)
1421 {
1422 	struct bfin_mac_local *lp = netdev_priv(dev);
1423 
1424 	pr_debug("%s: %s\n", dev->name, __func__);
1425 
1426 	bfin_mac_disable();
1427 
1428 	del_timer(&lp->tx_reclaim_timer);
1429 
1430 	/* reset tx queue and free skb */
1431 	while (tx_list_head != current_tx_ptr) {
1432 		tx_list_head->desc_a.config &= ~DMAEN;
1433 		tx_list_head->status.status_word = 0;
1434 		if (tx_list_head->skb) {
1435 			dev_kfree_skb(tx_list_head->skb);
1436 			tx_list_head->skb = NULL;
1437 		}
1438 		tx_list_head = tx_list_head->next;
1439 	}
1440 
1441 	if (netif_queue_stopped(dev))
1442 		netif_wake_queue(dev);
1443 
1444 	bfin_mac_enable(lp->phydev);
1445 
1446 	/* We can accept TX packets again */
1447 	dev->trans_start = jiffies; /* prevent tx timeout */
1448 }
1449 
bfin_mac_multicast_hash(struct net_device * dev)1450 static void bfin_mac_multicast_hash(struct net_device *dev)
1451 {
1452 	u32 emac_hashhi, emac_hashlo;
1453 	struct netdev_hw_addr *ha;
1454 	u32 crc;
1455 
1456 	emac_hashhi = emac_hashlo = 0;
1457 
1458 	netdev_for_each_mc_addr(ha, dev) {
1459 		crc = ether_crc(ETH_ALEN, ha->addr);
1460 		crc >>= 26;
1461 
1462 		if (crc & 0x20)
1463 			emac_hashhi |= 1 << (crc & 0x1f);
1464 		else
1465 			emac_hashlo |= 1 << (crc & 0x1f);
1466 	}
1467 
1468 	bfin_write_EMAC_HASHHI(emac_hashhi);
1469 	bfin_write_EMAC_HASHLO(emac_hashlo);
1470 }
1471 
1472 /*
1473  * This routine will, depending on the values passed to it,
1474  * either make it accept multicast packets, go into
1475  * promiscuous mode (for TCPDUMP and cousins) or accept
1476  * a select set of multicast packets
1477  */
bfin_mac_set_multicast_list(struct net_device * dev)1478 static void bfin_mac_set_multicast_list(struct net_device *dev)
1479 {
1480 	u32 sysctl;
1481 
1482 	if (dev->flags & IFF_PROMISC) {
1483 		netdev_info(dev, "set promisc mode\n");
1484 		sysctl = bfin_read_EMAC_OPMODE();
1485 		sysctl |= PR;
1486 		bfin_write_EMAC_OPMODE(sysctl);
1487 	} else if (dev->flags & IFF_ALLMULTI) {
1488 		/* accept all multicast */
1489 		sysctl = bfin_read_EMAC_OPMODE();
1490 		sysctl |= PAM;
1491 		bfin_write_EMAC_OPMODE(sysctl);
1492 	} else if (!netdev_mc_empty(dev)) {
1493 		/* set up multicast hash table */
1494 		sysctl = bfin_read_EMAC_OPMODE();
1495 		sysctl |= HM;
1496 		bfin_write_EMAC_OPMODE(sysctl);
1497 		bfin_mac_multicast_hash(dev);
1498 	} else {
1499 		/* clear promisc or multicast mode */
1500 		sysctl = bfin_read_EMAC_OPMODE();
1501 		sysctl &= ~(RAF | PAM);
1502 		bfin_write_EMAC_OPMODE(sysctl);
1503 	}
1504 }
1505 
bfin_mac_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)1506 static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1507 {
1508 	struct bfin_mac_local *lp = netdev_priv(netdev);
1509 
1510 	if (!netif_running(netdev))
1511 		return -EINVAL;
1512 
1513 	switch (cmd) {
1514 	case SIOCSHWTSTAMP:
1515 		return bfin_mac_hwtstamp_set(netdev, ifr);
1516 	case SIOCGHWTSTAMP:
1517 		return bfin_mac_hwtstamp_get(netdev, ifr);
1518 	default:
1519 		if (lp->phydev)
1520 			return phy_mii_ioctl(lp->phydev, ifr, cmd);
1521 		else
1522 			return -EOPNOTSUPP;
1523 	}
1524 }
1525 
1526 /*
1527  * this puts the device in an inactive state
1528  */
bfin_mac_shutdown(struct net_device * dev)1529 static void bfin_mac_shutdown(struct net_device *dev)
1530 {
1531 	/* Turn off the EMAC */
1532 	bfin_write_EMAC_OPMODE(0x00000000);
1533 	/* Turn off the EMAC RX DMA */
1534 	bfin_write_DMA1_CONFIG(0x0000);
1535 	bfin_write_DMA2_CONFIG(0x0000);
1536 }
1537 
1538 /*
1539  * Open and Initialize the interface
1540  *
1541  * Set up everything, reset the card, etc..
1542  */
bfin_mac_open(struct net_device * dev)1543 static int bfin_mac_open(struct net_device *dev)
1544 {
1545 	struct bfin_mac_local *lp = netdev_priv(dev);
1546 	int ret;
1547 	pr_debug("%s: %s\n", dev->name, __func__);
1548 
1549 	/*
1550 	 * Check that the address is valid.  If its not, refuse
1551 	 * to bring the device up.  The user must specify an
1552 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1553 	 */
1554 	if (!is_valid_ether_addr(dev->dev_addr)) {
1555 		netdev_warn(dev, "no valid ethernet hw addr\n");
1556 		return -EINVAL;
1557 	}
1558 
1559 	/* initial rx and tx list */
1560 	ret = desc_list_init(dev);
1561 	if (ret)
1562 		return ret;
1563 
1564 	phy_start(lp->phydev);
1565 	setup_system_regs(dev);
1566 	setup_mac_addr(dev->dev_addr);
1567 
1568 	bfin_mac_disable();
1569 	ret = bfin_mac_enable(lp->phydev);
1570 	if (ret)
1571 		return ret;
1572 	pr_debug("hardware init finished\n");
1573 
1574 	napi_enable(&lp->napi);
1575 	netif_start_queue(dev);
1576 	netif_carrier_on(dev);
1577 
1578 	return 0;
1579 }
1580 
1581 /*
1582  * this makes the board clean up everything that it can
1583  * and not talk to the outside world.   Caused by
1584  * an 'ifconfig ethX down'
1585  */
bfin_mac_close(struct net_device * dev)1586 static int bfin_mac_close(struct net_device *dev)
1587 {
1588 	struct bfin_mac_local *lp = netdev_priv(dev);
1589 	pr_debug("%s: %s\n", dev->name, __func__);
1590 
1591 	netif_stop_queue(dev);
1592 	napi_disable(&lp->napi);
1593 	netif_carrier_off(dev);
1594 
1595 	phy_stop(lp->phydev);
1596 	phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1597 
1598 	/* clear everything */
1599 	bfin_mac_shutdown(dev);
1600 
1601 	/* free the rx/tx buffers */
1602 	desc_list_free();
1603 
1604 	return 0;
1605 }
1606 
1607 static const struct net_device_ops bfin_mac_netdev_ops = {
1608 	.ndo_open		= bfin_mac_open,
1609 	.ndo_stop		= bfin_mac_close,
1610 	.ndo_start_xmit		= bfin_mac_hard_start_xmit,
1611 	.ndo_set_mac_address	= bfin_mac_set_mac_address,
1612 	.ndo_tx_timeout		= bfin_mac_timeout,
1613 	.ndo_set_rx_mode	= bfin_mac_set_multicast_list,
1614 	.ndo_do_ioctl           = bfin_mac_ioctl,
1615 	.ndo_validate_addr	= eth_validate_addr,
1616 	.ndo_change_mtu		= eth_change_mtu,
1617 #ifdef CONFIG_NET_POLL_CONTROLLER
1618 	.ndo_poll_controller	= bfin_mac_poll_controller,
1619 #endif
1620 };
1621 
bfin_mac_probe(struct platform_device * pdev)1622 static int bfin_mac_probe(struct platform_device *pdev)
1623 {
1624 	struct net_device *ndev;
1625 	struct bfin_mac_local *lp;
1626 	struct platform_device *pd;
1627 	struct bfin_mii_bus_platform_data *mii_bus_data;
1628 	int rc;
1629 
1630 	ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1631 	if (!ndev)
1632 		return -ENOMEM;
1633 
1634 	SET_NETDEV_DEV(ndev, &pdev->dev);
1635 	platform_set_drvdata(pdev, ndev);
1636 	lp = netdev_priv(ndev);
1637 	lp->ndev = ndev;
1638 
1639 	/* Grab the MAC address in the MAC */
1640 	*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1641 	*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1642 
1643 	/* probe mac */
1644 	/*todo: how to proble? which is revision_register */
1645 	bfin_write_EMAC_ADDRLO(0x12345678);
1646 	if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1647 		dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1648 		rc = -ENODEV;
1649 		goto out_err_probe_mac;
1650 	}
1651 
1652 
1653 	/*
1654 	 * Is it valid? (Did bootloader initialize it?)
1655 	 * Grab the MAC from the board somehow
1656 	 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1657 	 */
1658 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1659 		if (bfin_get_ether_addr(ndev->dev_addr) ||
1660 		     !is_valid_ether_addr(ndev->dev_addr)) {
1661 			/* Still not valid, get a random one */
1662 			netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
1663 			eth_hw_addr_random(ndev);
1664 		}
1665 	}
1666 
1667 	setup_mac_addr(ndev->dev_addr);
1668 
1669 	if (!dev_get_platdata(&pdev->dev)) {
1670 		dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1671 		rc = -ENODEV;
1672 		goto out_err_probe_mac;
1673 	}
1674 	pd = dev_get_platdata(&pdev->dev);
1675 	lp->mii_bus = platform_get_drvdata(pd);
1676 	if (!lp->mii_bus) {
1677 		dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1678 		rc = -ENODEV;
1679 		goto out_err_probe_mac;
1680 	}
1681 	lp->mii_bus->priv = ndev;
1682 	mii_bus_data = dev_get_platdata(&pd->dev);
1683 
1684 	rc = mii_probe(ndev, mii_bus_data->phy_mode);
1685 	if (rc) {
1686 		dev_err(&pdev->dev, "MII Probe failed!\n");
1687 		goto out_err_mii_probe;
1688 	}
1689 
1690 	lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1691 	lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1692 
1693 	ndev->netdev_ops = &bfin_mac_netdev_ops;
1694 	ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1695 
1696 	init_timer(&lp->tx_reclaim_timer);
1697 	lp->tx_reclaim_timer.data = (unsigned long)lp;
1698 	lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1699 
1700 	lp->flags = 0;
1701 	netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
1702 
1703 	spin_lock_init(&lp->lock);
1704 
1705 	/* now, enable interrupts */
1706 	/* register irq handler */
1707 	rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1708 			0, "EMAC_RX", ndev);
1709 	if (rc) {
1710 		dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1711 		rc = -EBUSY;
1712 		goto out_err_request_irq;
1713 	}
1714 
1715 	rc = register_netdev(ndev);
1716 	if (rc) {
1717 		dev_err(&pdev->dev, "Cannot register net device!\n");
1718 		goto out_err_reg_ndev;
1719 	}
1720 
1721 	bfin_mac_hwtstamp_init(ndev);
1722 	rc = bfin_phc_init(ndev, &pdev->dev);
1723 	if (rc) {
1724 		dev_err(&pdev->dev, "Cannot register PHC device!\n");
1725 		goto out_err_phc;
1726 	}
1727 
1728 	/* now, print out the card info, in a short format.. */
1729 	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1730 
1731 	return 0;
1732 
1733 out_err_phc:
1734 out_err_reg_ndev:
1735 	free_irq(IRQ_MAC_RX, ndev);
1736 out_err_request_irq:
1737 	netif_napi_del(&lp->napi);
1738 out_err_mii_probe:
1739 	mdiobus_unregister(lp->mii_bus);
1740 	mdiobus_free(lp->mii_bus);
1741 out_err_probe_mac:
1742 	free_netdev(ndev);
1743 
1744 	return rc;
1745 }
1746 
bfin_mac_remove(struct platform_device * pdev)1747 static int bfin_mac_remove(struct platform_device *pdev)
1748 {
1749 	struct net_device *ndev = platform_get_drvdata(pdev);
1750 	struct bfin_mac_local *lp = netdev_priv(ndev);
1751 
1752 	bfin_phc_release(lp);
1753 
1754 	lp->mii_bus->priv = NULL;
1755 
1756 	unregister_netdev(ndev);
1757 
1758 	netif_napi_del(&lp->napi);
1759 
1760 	free_irq(IRQ_MAC_RX, ndev);
1761 
1762 	free_netdev(ndev);
1763 
1764 	return 0;
1765 }
1766 
1767 #ifdef CONFIG_PM
bfin_mac_suspend(struct platform_device * pdev,pm_message_t mesg)1768 static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1769 {
1770 	struct net_device *net_dev = platform_get_drvdata(pdev);
1771 	struct bfin_mac_local *lp = netdev_priv(net_dev);
1772 
1773 	if (lp->wol) {
1774 		bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1775 		bfin_write_EMAC_WKUP_CTL(MPKE);
1776 		enable_irq_wake(IRQ_MAC_WAKEDET);
1777 	} else {
1778 		if (netif_running(net_dev))
1779 			bfin_mac_close(net_dev);
1780 	}
1781 
1782 	return 0;
1783 }
1784 
bfin_mac_resume(struct platform_device * pdev)1785 static int bfin_mac_resume(struct platform_device *pdev)
1786 {
1787 	struct net_device *net_dev = platform_get_drvdata(pdev);
1788 	struct bfin_mac_local *lp = netdev_priv(net_dev);
1789 
1790 	if (lp->wol) {
1791 		bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1792 		bfin_write_EMAC_WKUP_CTL(0);
1793 		disable_irq_wake(IRQ_MAC_WAKEDET);
1794 	} else {
1795 		if (netif_running(net_dev))
1796 			bfin_mac_open(net_dev);
1797 	}
1798 
1799 	return 0;
1800 }
1801 #else
1802 #define bfin_mac_suspend NULL
1803 #define bfin_mac_resume NULL
1804 #endif	/* CONFIG_PM */
1805 
bfin_mii_bus_probe(struct platform_device * pdev)1806 static int bfin_mii_bus_probe(struct platform_device *pdev)
1807 {
1808 	struct mii_bus *miibus;
1809 	struct bfin_mii_bus_platform_data *mii_bus_pd;
1810 	const unsigned short *pin_req;
1811 	int rc, i;
1812 
1813 	mii_bus_pd = dev_get_platdata(&pdev->dev);
1814 	if (!mii_bus_pd) {
1815 		dev_err(&pdev->dev, "No peripherals in platform data!\n");
1816 		return -EINVAL;
1817 	}
1818 
1819 	/*
1820 	 * We are setting up a network card,
1821 	 * so set the GPIO pins to Ethernet mode
1822 	 */
1823 	pin_req = mii_bus_pd->mac_peripherals;
1824 	rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1825 	if (rc) {
1826 		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1827 		return rc;
1828 	}
1829 
1830 	rc = -ENOMEM;
1831 	miibus = mdiobus_alloc();
1832 	if (miibus == NULL)
1833 		goto out_err_alloc;
1834 	miibus->read = bfin_mdiobus_read;
1835 	miibus->write = bfin_mdiobus_write;
1836 
1837 	miibus->parent = &pdev->dev;
1838 	miibus->name = "bfin_mii_bus";
1839 	miibus->phy_mask = mii_bus_pd->phy_mask;
1840 
1841 	snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
1842 		pdev->name, pdev->id);
1843 	miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1844 	if (!miibus->irq)
1845 		goto out_err_irq_alloc;
1846 
1847 	for (i = rc; i < PHY_MAX_ADDR; ++i)
1848 		miibus->irq[i] = PHY_POLL;
1849 
1850 	rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1851 	if (rc != mii_bus_pd->phydev_number)
1852 		dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1853 			mii_bus_pd->phydev_number);
1854 	for (i = 0; i < rc; ++i) {
1855 		unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1856 		if (phyaddr < PHY_MAX_ADDR)
1857 			miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1858 		else
1859 			dev_err(&pdev->dev,
1860 				"Invalid PHY address %i for phydev %i\n",
1861 				phyaddr, i);
1862 	}
1863 
1864 	rc = mdiobus_register(miibus);
1865 	if (rc) {
1866 		dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1867 		goto out_err_mdiobus_register;
1868 	}
1869 
1870 	platform_set_drvdata(pdev, miibus);
1871 	return 0;
1872 
1873 out_err_mdiobus_register:
1874 	kfree(miibus->irq);
1875 out_err_irq_alloc:
1876 	mdiobus_free(miibus);
1877 out_err_alloc:
1878 	peripheral_free_list(pin_req);
1879 
1880 	return rc;
1881 }
1882 
bfin_mii_bus_remove(struct platform_device * pdev)1883 static int bfin_mii_bus_remove(struct platform_device *pdev)
1884 {
1885 	struct mii_bus *miibus = platform_get_drvdata(pdev);
1886 	struct bfin_mii_bus_platform_data *mii_bus_pd =
1887 		dev_get_platdata(&pdev->dev);
1888 
1889 	mdiobus_unregister(miibus);
1890 	kfree(miibus->irq);
1891 	mdiobus_free(miibus);
1892 	peripheral_free_list(mii_bus_pd->mac_peripherals);
1893 
1894 	return 0;
1895 }
1896 
1897 static struct platform_driver bfin_mii_bus_driver = {
1898 	.probe = bfin_mii_bus_probe,
1899 	.remove = bfin_mii_bus_remove,
1900 	.driver = {
1901 		.name = "bfin_mii_bus",
1902 	},
1903 };
1904 
1905 static struct platform_driver bfin_mac_driver = {
1906 	.probe = bfin_mac_probe,
1907 	.remove = bfin_mac_remove,
1908 	.resume = bfin_mac_resume,
1909 	.suspend = bfin_mac_suspend,
1910 	.driver = {
1911 		.name = KBUILD_MODNAME,
1912 	},
1913 };
1914 
bfin_mac_init(void)1915 static int __init bfin_mac_init(void)
1916 {
1917 	int ret;
1918 	ret = platform_driver_register(&bfin_mii_bus_driver);
1919 	if (!ret)
1920 		return platform_driver_register(&bfin_mac_driver);
1921 	return -ENODEV;
1922 }
1923 
1924 module_init(bfin_mac_init);
1925 
bfin_mac_cleanup(void)1926 static void __exit bfin_mac_cleanup(void)
1927 {
1928 	platform_driver_unregister(&bfin_mac_driver);
1929 	platform_driver_unregister(&bfin_mii_bus_driver);
1930 }
1931 
1932 module_exit(bfin_mac_cleanup);
1933 
1934