1
2/* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/module.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/ktime.h>
16#include <linux/of_address.h>
17#include <linux/phy.h>
18#include <linux/of_mdio.h>
19#include <linux/of_net.h>
20#include <linux/mfd/syscon.h>
21#include <linux/regmap.h>
22
23#define PPE_CFG_RX_ADDR			0x100
24#define PPE_CFG_POOL_GRP		0x300
25#define PPE_CFG_RX_BUF_SIZE		0x400
26#define PPE_CFG_RX_FIFO_SIZE		0x500
27#define PPE_CURR_BUF_CNT		0xa200
28
29#define GE_DUPLEX_TYPE			0x08
30#define GE_MAX_FRM_SIZE_REG		0x3c
31#define GE_PORT_MODE			0x40
32#define GE_PORT_EN			0x44
33#define GE_SHORT_RUNTS_THR_REG		0x50
34#define GE_TX_LOCAL_PAGE_REG		0x5c
35#define GE_TRANSMIT_CONTROL_REG		0x60
36#define GE_CF_CRC_STRIP_REG		0x1b0
37#define GE_MODE_CHANGE_REG		0x1b4
38#define GE_RECV_CONTROL_REG		0x1e0
39#define GE_STATION_MAC_ADDRESS		0x210
40#define PPE_CFG_CPU_ADD_ADDR		0x580
41#define PPE_CFG_MAX_FRAME_LEN_REG	0x408
42#define PPE_CFG_BUS_CTRL_REG		0x424
43#define PPE_CFG_RX_CTRL_REG		0x428
44#define PPE_CFG_RX_PKT_MODE_REG		0x438
45#define PPE_CFG_QOS_VMID_GEN		0x500
46#define PPE_CFG_RX_PKT_INT		0x538
47#define PPE_INTEN			0x600
48#define PPE_INTSTS			0x608
49#define PPE_RINT			0x604
50#define PPE_CFG_STS_MODE		0x700
51#define PPE_HIS_RX_PKT_CNT		0x804
52
53/* REG_INTERRUPT */
54#define RCV_INT				BIT(10)
55#define RCV_NOBUF			BIT(8)
56#define RCV_DROP			BIT(7)
57#define TX_DROP				BIT(6)
58#define DEF_INT_ERR			(RCV_NOBUF | RCV_DROP | TX_DROP)
59#define DEF_INT_MASK			(RCV_INT | DEF_INT_ERR)
60
61/* TX descriptor config */
62#define TX_FREE_MEM			BIT(0)
63#define TX_READ_ALLOC_L3		BIT(1)
64#define TX_FINISH_CACHE_INV		BIT(2)
65#define TX_CLEAR_WB			BIT(4)
66#define TX_L3_CHECKSUM			BIT(5)
67#define TX_LOOP_BACK			BIT(11)
68
69/* RX error */
70#define RX_PKT_DROP			BIT(0)
71#define RX_L2_ERR			BIT(1)
72#define RX_PKT_ERR			(RX_PKT_DROP | RX_L2_ERR)
73
74#define SGMII_SPEED_1000		0x08
75#define SGMII_SPEED_100			0x07
76#define SGMII_SPEED_10			0x06
77#define MII_SPEED_100			0x01
78#define MII_SPEED_10			0x00
79
80#define GE_DUPLEX_FULL			BIT(0)
81#define GE_DUPLEX_HALF			0x00
82#define GE_MODE_CHANGE_EN		BIT(0)
83
84#define GE_TX_AUTO_NEG			BIT(5)
85#define GE_TX_ADD_CRC			BIT(6)
86#define GE_TX_SHORT_PAD_THROUGH		BIT(7)
87
88#define GE_RX_STRIP_CRC			BIT(0)
89#define GE_RX_STRIP_PAD			BIT(3)
90#define GE_RX_PAD_EN			BIT(4)
91
92#define GE_AUTO_NEG_CTL			BIT(0)
93
94#define GE_RX_INT_THRESHOLD		BIT(6)
95#define GE_RX_TIMEOUT			0x04
96
97#define GE_RX_PORT_EN			BIT(1)
98#define GE_TX_PORT_EN			BIT(2)
99
100#define PPE_CFG_STS_RX_PKT_CNT_RC	BIT(12)
101
102#define PPE_CFG_RX_PKT_ALIGN		BIT(18)
103#define PPE_CFG_QOS_VMID_MODE		BIT(14)
104#define PPE_CFG_QOS_VMID_GRP_SHIFT	8
105
106#define PPE_CFG_RX_FIFO_FSFU		BIT(11)
107#define PPE_CFG_RX_DEPTH_SHIFT		16
108#define PPE_CFG_RX_START_SHIFT		0
109#define PPE_CFG_RX_CTRL_ALIGN_SHIFT	11
110
111#define PPE_CFG_BUS_LOCAL_REL		BIT(14)
112#define PPE_CFG_BUS_BIG_ENDIEN		BIT(0)
113
114#define RX_DESC_NUM			128
115#define TX_DESC_NUM			256
116#define TX_NEXT(N)			(((N) + 1) & (TX_DESC_NUM-1))
117#define RX_NEXT(N)			(((N) + 1) & (RX_DESC_NUM-1))
118
119#define GMAC_PPE_RX_PKT_MAX_LEN		379
120#define GMAC_MAX_PKT_LEN		1516
121#define GMAC_MIN_PKT_LEN		31
122#define RX_BUF_SIZE			1600
123#define RESET_TIMEOUT			1000
124#define TX_TIMEOUT			(6 * HZ)
125
126#define DRV_NAME			"hip04-ether"
127#define DRV_VERSION			"v1.0"
128
129#define HIP04_MAX_TX_COALESCE_USECS	200
130#define HIP04_MIN_TX_COALESCE_USECS	100
131#define HIP04_MAX_TX_COALESCE_FRAMES	200
132#define HIP04_MIN_TX_COALESCE_FRAMES	100
133
134struct tx_desc {
135	u32 send_addr;
136	u32 send_size;
137	u32 next_addr;
138	u32 cfg;
139	u32 wb_addr;
140} __aligned(64);
141
142struct rx_desc {
143	u16 reserved_16;
144	u16 pkt_len;
145	u32 reserve1[3];
146	u32 pkt_err;
147	u32 reserve2[4];
148};
149
150struct hip04_priv {
151	void __iomem *base;
152	int phy_mode;
153	int chan;
154	unsigned int port;
155	unsigned int speed;
156	unsigned int duplex;
157	unsigned int reg_inten;
158
159	struct napi_struct napi;
160	struct net_device *ndev;
161
162	struct tx_desc *tx_desc;
163	dma_addr_t tx_desc_dma;
164	struct sk_buff *tx_skb[TX_DESC_NUM];
165	dma_addr_t tx_phys[TX_DESC_NUM];
166	unsigned int tx_head;
167
168	int tx_coalesce_frames;
169	int tx_coalesce_usecs;
170	struct hrtimer tx_coalesce_timer;
171
172	unsigned char *rx_buf[RX_DESC_NUM];
173	dma_addr_t rx_phys[RX_DESC_NUM];
174	unsigned int rx_head;
175	unsigned int rx_buf_size;
176
177	struct device_node *phy_node;
178	struct phy_device *phy;
179	struct regmap *map;
180	struct work_struct tx_timeout_task;
181
182	/* written only by tx cleanup */
183	unsigned int tx_tail ____cacheline_aligned_in_smp;
184};
185
186static inline unsigned int tx_count(unsigned int head, unsigned int tail)
187{
188	return (head - tail) % (TX_DESC_NUM - 1);
189}
190
191static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
192{
193	struct hip04_priv *priv = netdev_priv(ndev);
194	u32 val;
195
196	priv->speed = speed;
197	priv->duplex = duplex;
198
199	switch (priv->phy_mode) {
200	case PHY_INTERFACE_MODE_SGMII:
201		if (speed == SPEED_1000)
202			val = SGMII_SPEED_1000;
203		else if (speed == SPEED_100)
204			val = SGMII_SPEED_100;
205		else
206			val = SGMII_SPEED_10;
207		break;
208	case PHY_INTERFACE_MODE_MII:
209		if (speed == SPEED_100)
210			val = MII_SPEED_100;
211		else
212			val = MII_SPEED_10;
213		break;
214	default:
215		netdev_warn(ndev, "not supported mode\n");
216		val = MII_SPEED_10;
217		break;
218	}
219	writel_relaxed(val, priv->base + GE_PORT_MODE);
220
221	val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
222	writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
223
224	val = GE_MODE_CHANGE_EN;
225	writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
226}
227
228static void hip04_reset_ppe(struct hip04_priv *priv)
229{
230	u32 val, tmp, timeout = 0;
231
232	do {
233		regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
234		regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
235		if (timeout++ > RESET_TIMEOUT)
236			break;
237	} while (val & 0xfff);
238}
239
240static void hip04_config_fifo(struct hip04_priv *priv)
241{
242	u32 val;
243
244	val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
245	val |= PPE_CFG_STS_RX_PKT_CNT_RC;
246	writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
247
248	val = BIT(priv->port);
249	regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
250
251	val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
252	val |= PPE_CFG_QOS_VMID_MODE;
253	writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
254
255	val = RX_BUF_SIZE;
256	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
257
258	val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
259	val |= PPE_CFG_RX_FIFO_FSFU;
260	val |= priv->chan << PPE_CFG_RX_START_SHIFT;
261	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
262
263	val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
264	writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
265
266	val = PPE_CFG_RX_PKT_ALIGN;
267	writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
268
269	val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
270	writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
271
272	val = GMAC_PPE_RX_PKT_MAX_LEN;
273	writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
274
275	val = GMAC_MAX_PKT_LEN;
276	writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
277
278	val = GMAC_MIN_PKT_LEN;
279	writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
280
281	val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
282	val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
283	writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
284
285	val = GE_RX_STRIP_CRC;
286	writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
287
288	val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
289	val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
290	writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
291
292	val = GE_AUTO_NEG_CTL;
293	writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
294}
295
296static void hip04_mac_enable(struct net_device *ndev)
297{
298	struct hip04_priv *priv = netdev_priv(ndev);
299	u32 val;
300
301	/* enable tx & rx */
302	val = readl_relaxed(priv->base + GE_PORT_EN);
303	val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
304	writel_relaxed(val, priv->base + GE_PORT_EN);
305
306	/* clear rx int */
307	val = RCV_INT;
308	writel_relaxed(val, priv->base + PPE_RINT);
309
310	/* config recv int */
311	val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
312	writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
313
314	/* enable interrupt */
315	priv->reg_inten = DEF_INT_MASK;
316	writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
317}
318
319static void hip04_mac_disable(struct net_device *ndev)
320{
321	struct hip04_priv *priv = netdev_priv(ndev);
322	u32 val;
323
324	/* disable int */
325	priv->reg_inten &= ~(DEF_INT_MASK);
326	writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
327
328	/* disable tx & rx */
329	val = readl_relaxed(priv->base + GE_PORT_EN);
330	val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
331	writel_relaxed(val, priv->base + GE_PORT_EN);
332}
333
334static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
335{
336	writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
337}
338
339static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
340{
341	regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
342}
343
344static u32 hip04_recv_cnt(struct hip04_priv *priv)
345{
346	return readl(priv->base + PPE_HIS_RX_PKT_CNT);
347}
348
349static void hip04_update_mac_address(struct net_device *ndev)
350{
351	struct hip04_priv *priv = netdev_priv(ndev);
352
353	writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
354		       priv->base + GE_STATION_MAC_ADDRESS);
355	writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
356			(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
357		       priv->base + GE_STATION_MAC_ADDRESS + 4);
358}
359
360static int hip04_set_mac_address(struct net_device *ndev, void *addr)
361{
362	eth_mac_addr(ndev, addr);
363	hip04_update_mac_address(ndev);
364	return 0;
365}
366
367static int hip04_tx_reclaim(struct net_device *ndev, bool force)
368{
369	struct hip04_priv *priv = netdev_priv(ndev);
370	unsigned tx_tail = priv->tx_tail;
371	struct tx_desc *desc;
372	unsigned int bytes_compl = 0, pkts_compl = 0;
373	unsigned int count;
374
375	smp_rmb();
376	count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
377	if (count == 0)
378		goto out;
379
380	while (count) {
381		desc = &priv->tx_desc[tx_tail];
382		if (desc->send_addr != 0) {
383			if (force)
384				desc->send_addr = 0;
385			else
386				break;
387		}
388
389		if (priv->tx_phys[tx_tail]) {
390			dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
391					 priv->tx_skb[tx_tail]->len,
392					 DMA_TO_DEVICE);
393			priv->tx_phys[tx_tail] = 0;
394		}
395		pkts_compl++;
396		bytes_compl += priv->tx_skb[tx_tail]->len;
397		dev_kfree_skb(priv->tx_skb[tx_tail]);
398		priv->tx_skb[tx_tail] = NULL;
399		tx_tail = TX_NEXT(tx_tail);
400		count--;
401	}
402
403	priv->tx_tail = tx_tail;
404	smp_wmb(); /* Ensure tx_tail visible to xmit */
405
406out:
407	if (pkts_compl || bytes_compl)
408		netdev_completed_queue(ndev, pkts_compl, bytes_compl);
409
410	if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
411		netif_wake_queue(ndev);
412
413	return count;
414}
415
416static void hip04_start_tx_timer(struct hip04_priv *priv)
417{
418	unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
419
420	/* allow timer to fire after half the time at the earliest */
421	hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
422			       ns, HRTIMER_MODE_REL);
423}
424
425static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
426{
427	struct hip04_priv *priv = netdev_priv(ndev);
428	struct net_device_stats *stats = &ndev->stats;
429	unsigned int tx_head = priv->tx_head, count;
430	struct tx_desc *desc = &priv->tx_desc[tx_head];
431	dma_addr_t phys;
432
433	smp_rmb();
434	count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
435	if (count == (TX_DESC_NUM - 1)) {
436		netif_stop_queue(ndev);
437		return NETDEV_TX_BUSY;
438	}
439
440	phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
441	if (dma_mapping_error(&ndev->dev, phys)) {
442		dev_kfree_skb(skb);
443		return NETDEV_TX_OK;
444	}
445
446	priv->tx_skb[tx_head] = skb;
447	priv->tx_phys[tx_head] = phys;
448	desc->send_addr = cpu_to_be32(phys);
449	desc->send_size = cpu_to_be32(skb->len);
450	desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
451	phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
452	desc->wb_addr = cpu_to_be32(phys);
453	skb_tx_timestamp(skb);
454
455	hip04_set_xmit_desc(priv, phys);
456	priv->tx_head = TX_NEXT(tx_head);
457	count++;
458	netdev_sent_queue(ndev, skb->len);
459
460	stats->tx_bytes += skb->len;
461	stats->tx_packets++;
462
463	/* Ensure tx_head update visible to tx reclaim */
464	smp_wmb();
465
466	/* queue is getting full, better start cleaning up now */
467	if (count >= priv->tx_coalesce_frames) {
468		if (napi_schedule_prep(&priv->napi)) {
469			/* disable rx interrupt and timer */
470			priv->reg_inten &= ~(RCV_INT);
471			writel_relaxed(DEF_INT_MASK & ~RCV_INT,
472				       priv->base + PPE_INTEN);
473			hrtimer_cancel(&priv->tx_coalesce_timer);
474			__napi_schedule(&priv->napi);
475		}
476	} else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
477		/* cleanup not pending yet, start a new timer */
478		hip04_start_tx_timer(priv);
479	}
480
481	return NETDEV_TX_OK;
482}
483
484static int hip04_rx_poll(struct napi_struct *napi, int budget)
485{
486	struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
487	struct net_device *ndev = priv->ndev;
488	struct net_device_stats *stats = &ndev->stats;
489	unsigned int cnt = hip04_recv_cnt(priv);
490	struct rx_desc *desc;
491	struct sk_buff *skb;
492	unsigned char *buf;
493	bool last = false;
494	dma_addr_t phys;
495	int rx = 0;
496	int tx_remaining;
497	u16 len;
498	u32 err;
499
500	while (cnt && !last) {
501		buf = priv->rx_buf[priv->rx_head];
502		skb = build_skb(buf, priv->rx_buf_size);
503		if (unlikely(!skb))
504			net_dbg_ratelimited("build_skb failed\n");
505
506		dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
507				 RX_BUF_SIZE, DMA_FROM_DEVICE);
508		priv->rx_phys[priv->rx_head] = 0;
509
510		desc = (struct rx_desc *)skb->data;
511		len = be16_to_cpu(desc->pkt_len);
512		err = be32_to_cpu(desc->pkt_err);
513
514		if (0 == len) {
515			dev_kfree_skb_any(skb);
516			last = true;
517		} else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
518			dev_kfree_skb_any(skb);
519			stats->rx_dropped++;
520			stats->rx_errors++;
521		} else {
522			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
523			skb_put(skb, len);
524			skb->protocol = eth_type_trans(skb, ndev);
525			napi_gro_receive(&priv->napi, skb);
526			stats->rx_packets++;
527			stats->rx_bytes += len;
528			rx++;
529		}
530
531		buf = netdev_alloc_frag(priv->rx_buf_size);
532		if (!buf)
533			goto done;
534		phys = dma_map_single(&ndev->dev, buf,
535				      RX_BUF_SIZE, DMA_FROM_DEVICE);
536		if (dma_mapping_error(&ndev->dev, phys))
537			goto done;
538		priv->rx_buf[priv->rx_head] = buf;
539		priv->rx_phys[priv->rx_head] = phys;
540		hip04_set_recv_desc(priv, phys);
541
542		priv->rx_head = RX_NEXT(priv->rx_head);
543		if (rx >= budget)
544			goto done;
545
546		if (--cnt == 0)
547			cnt = hip04_recv_cnt(priv);
548	}
549
550	if (!(priv->reg_inten & RCV_INT)) {
551		/* enable rx interrupt */
552		priv->reg_inten |= RCV_INT;
553		writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
554	}
555	napi_complete(napi);
556done:
557	/* clean up tx descriptors and start a new timer if necessary */
558	tx_remaining = hip04_tx_reclaim(ndev, false);
559	if (rx < budget && tx_remaining)
560		hip04_start_tx_timer(priv);
561
562	return rx;
563}
564
565static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
566{
567	struct net_device *ndev = (struct net_device *)dev_id;
568	struct hip04_priv *priv = netdev_priv(ndev);
569	struct net_device_stats *stats = &ndev->stats;
570	u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
571
572	if (!ists)
573		return IRQ_NONE;
574
575	writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
576
577	if (unlikely(ists & DEF_INT_ERR)) {
578		if (ists & (RCV_NOBUF | RCV_DROP)) {
579			stats->rx_errors++;
580			stats->rx_dropped++;
581			netdev_err(ndev, "rx drop\n");
582		}
583		if (ists & TX_DROP) {
584			stats->tx_dropped++;
585			netdev_err(ndev, "tx drop\n");
586		}
587	}
588
589	if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
590		/* disable rx interrupt */
591		priv->reg_inten &= ~(RCV_INT);
592		writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
593		hrtimer_cancel(&priv->tx_coalesce_timer);
594		__napi_schedule(&priv->napi);
595	}
596
597	return IRQ_HANDLED;
598}
599
600enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
601{
602	struct hip04_priv *priv;
603
604	priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
605
606	if (napi_schedule_prep(&priv->napi)) {
607		/* disable rx interrupt */
608		priv->reg_inten &= ~(RCV_INT);
609		writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
610		__napi_schedule(&priv->napi);
611	}
612
613	return HRTIMER_NORESTART;
614}
615
616static void hip04_adjust_link(struct net_device *ndev)
617{
618	struct hip04_priv *priv = netdev_priv(ndev);
619	struct phy_device *phy = priv->phy;
620
621	if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
622		hip04_config_port(ndev, phy->speed, phy->duplex);
623		phy_print_status(phy);
624	}
625}
626
627static int hip04_mac_open(struct net_device *ndev)
628{
629	struct hip04_priv *priv = netdev_priv(ndev);
630	int i;
631
632	priv->rx_head = 0;
633	priv->tx_head = 0;
634	priv->tx_tail = 0;
635	hip04_reset_ppe(priv);
636
637	for (i = 0; i < RX_DESC_NUM; i++) {
638		dma_addr_t phys;
639
640		phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
641				      RX_BUF_SIZE, DMA_FROM_DEVICE);
642		if (dma_mapping_error(&ndev->dev, phys))
643			return -EIO;
644
645		priv->rx_phys[i] = phys;
646		hip04_set_recv_desc(priv, phys);
647	}
648
649	if (priv->phy)
650		phy_start(priv->phy);
651
652	netdev_reset_queue(ndev);
653	netif_start_queue(ndev);
654	hip04_mac_enable(ndev);
655	napi_enable(&priv->napi);
656
657	return 0;
658}
659
660static int hip04_mac_stop(struct net_device *ndev)
661{
662	struct hip04_priv *priv = netdev_priv(ndev);
663	int i;
664
665	napi_disable(&priv->napi);
666	netif_stop_queue(ndev);
667	hip04_mac_disable(ndev);
668	hip04_tx_reclaim(ndev, true);
669	hip04_reset_ppe(priv);
670
671	if (priv->phy)
672		phy_stop(priv->phy);
673
674	for (i = 0; i < RX_DESC_NUM; i++) {
675		if (priv->rx_phys[i]) {
676			dma_unmap_single(&ndev->dev, priv->rx_phys[i],
677					 RX_BUF_SIZE, DMA_FROM_DEVICE);
678			priv->rx_phys[i] = 0;
679		}
680	}
681
682	return 0;
683}
684
685static void hip04_timeout(struct net_device *ndev)
686{
687	struct hip04_priv *priv = netdev_priv(ndev);
688
689	schedule_work(&priv->tx_timeout_task);
690}
691
692static void hip04_tx_timeout_task(struct work_struct *work)
693{
694	struct hip04_priv *priv;
695
696	priv = container_of(work, struct hip04_priv, tx_timeout_task);
697	hip04_mac_stop(priv->ndev);
698	hip04_mac_open(priv->ndev);
699}
700
701static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
702{
703	return &ndev->stats;
704}
705
706static int hip04_get_coalesce(struct net_device *netdev,
707			      struct ethtool_coalesce *ec)
708{
709	struct hip04_priv *priv = netdev_priv(netdev);
710
711	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
712	ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
713
714	return 0;
715}
716
717static int hip04_set_coalesce(struct net_device *netdev,
718			      struct ethtool_coalesce *ec)
719{
720	struct hip04_priv *priv = netdev_priv(netdev);
721
722	/* Check not supported parameters  */
723	if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
724	    (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
725	    (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
726	    (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
727	    (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
728	    (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
729	    (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
730	    (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
731	    (ec->tx_max_coalesced_frames_irq) ||
732	    (ec->stats_block_coalesce_usecs) ||
733	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
734		return -EOPNOTSUPP;
735
736	if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
737	     ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
738	    (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
739	     ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
740		return -EINVAL;
741
742	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
743	priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
744
745	return 0;
746}
747
748static void hip04_get_drvinfo(struct net_device *netdev,
749			      struct ethtool_drvinfo *drvinfo)
750{
751	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
752	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
753}
754
755static struct ethtool_ops hip04_ethtool_ops = {
756	.get_coalesce		= hip04_get_coalesce,
757	.set_coalesce		= hip04_set_coalesce,
758	.get_drvinfo		= hip04_get_drvinfo,
759};
760
761static struct net_device_ops hip04_netdev_ops = {
762	.ndo_open		= hip04_mac_open,
763	.ndo_stop		= hip04_mac_stop,
764	.ndo_get_stats		= hip04_get_stats,
765	.ndo_start_xmit		= hip04_mac_start_xmit,
766	.ndo_set_mac_address	= hip04_set_mac_address,
767	.ndo_tx_timeout         = hip04_timeout,
768	.ndo_validate_addr	= eth_validate_addr,
769	.ndo_change_mtu		= eth_change_mtu,
770};
771
772static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
773{
774	struct hip04_priv *priv = netdev_priv(ndev);
775	int i;
776
777	priv->tx_desc = dma_alloc_coherent(d,
778					   TX_DESC_NUM * sizeof(struct tx_desc),
779					   &priv->tx_desc_dma, GFP_KERNEL);
780	if (!priv->tx_desc)
781		return -ENOMEM;
782
783	priv->rx_buf_size = RX_BUF_SIZE +
784			    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
785	for (i = 0; i < RX_DESC_NUM; i++) {
786		priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
787		if (!priv->rx_buf[i])
788			return -ENOMEM;
789	}
790
791	return 0;
792}
793
794static void hip04_free_ring(struct net_device *ndev, struct device *d)
795{
796	struct hip04_priv *priv = netdev_priv(ndev);
797	int i;
798
799	for (i = 0; i < RX_DESC_NUM; i++)
800		if (priv->rx_buf[i])
801			put_page(virt_to_head_page(priv->rx_buf[i]));
802
803	for (i = 0; i < TX_DESC_NUM; i++)
804		if (priv->tx_skb[i])
805			dev_kfree_skb_any(priv->tx_skb[i]);
806
807	dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
808			  priv->tx_desc, priv->tx_desc_dma);
809}
810
811static int hip04_mac_probe(struct platform_device *pdev)
812{
813	struct device *d = &pdev->dev;
814	struct device_node *node = d->of_node;
815	struct of_phandle_args arg;
816	struct net_device *ndev;
817	struct hip04_priv *priv;
818	struct resource *res;
819	unsigned int irq;
820	int ret;
821
822	ndev = alloc_etherdev(sizeof(struct hip04_priv));
823	if (!ndev)
824		return -ENOMEM;
825
826	priv = netdev_priv(ndev);
827	priv->ndev = ndev;
828	platform_set_drvdata(pdev, ndev);
829
830	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831	priv->base = devm_ioremap_resource(d, res);
832	if (IS_ERR(priv->base)) {
833		ret = PTR_ERR(priv->base);
834		goto init_fail;
835	}
836
837	ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
838	if (ret < 0) {
839		dev_warn(d, "no port-handle\n");
840		goto init_fail;
841	}
842
843	priv->port = arg.args[0];
844	priv->chan = arg.args[1] * RX_DESC_NUM;
845
846	hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
847
848	/* BQL will try to keep the TX queue as short as possible, but it can't
849	 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
850	 * but also long enough to gather up enough frames to ensure we don't
851	 * get more interrupts than necessary.
852	 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
853	 */
854	priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
855	priv->tx_coalesce_usecs = 200;
856	priv->tx_coalesce_timer.function = tx_done;
857
858	priv->map = syscon_node_to_regmap(arg.np);
859	if (IS_ERR(priv->map)) {
860		dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
861		ret = PTR_ERR(priv->map);
862		goto init_fail;
863	}
864
865	priv->phy_mode = of_get_phy_mode(node);
866	if (priv->phy_mode < 0) {
867		dev_warn(d, "not find phy-mode\n");
868		ret = -EINVAL;
869		goto init_fail;
870	}
871
872	irq = platform_get_irq(pdev, 0);
873	if (irq <= 0) {
874		ret = -EINVAL;
875		goto init_fail;
876	}
877
878	ret = devm_request_irq(d, irq, hip04_mac_interrupt,
879			       0, pdev->name, ndev);
880	if (ret) {
881		netdev_err(ndev, "devm_request_irq failed\n");
882		goto init_fail;
883	}
884
885	priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
886	if (priv->phy_node) {
887		priv->phy = of_phy_connect(ndev, priv->phy_node,
888					   &hip04_adjust_link,
889					   0, priv->phy_mode);
890		if (!priv->phy) {
891			ret = -EPROBE_DEFER;
892			goto init_fail;
893		}
894	}
895
896	INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
897
898	ether_setup(ndev);
899	ndev->netdev_ops = &hip04_netdev_ops;
900	ndev->ethtool_ops = &hip04_ethtool_ops;
901	ndev->watchdog_timeo = TX_TIMEOUT;
902	ndev->priv_flags |= IFF_UNICAST_FLT;
903	ndev->irq = irq;
904	netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
905	SET_NETDEV_DEV(ndev, &pdev->dev);
906
907	hip04_reset_ppe(priv);
908	if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
909		hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
910
911	hip04_config_fifo(priv);
912	random_ether_addr(ndev->dev_addr);
913	hip04_update_mac_address(ndev);
914
915	ret = hip04_alloc_ring(ndev, d);
916	if (ret) {
917		netdev_err(ndev, "alloc ring fail\n");
918		goto alloc_fail;
919	}
920
921	ret = register_netdev(ndev);
922	if (ret) {
923		free_netdev(ndev);
924		goto alloc_fail;
925	}
926
927	return 0;
928
929alloc_fail:
930	hip04_free_ring(ndev, d);
931init_fail:
932	of_node_put(priv->phy_node);
933	free_netdev(ndev);
934	return ret;
935}
936
937static int hip04_remove(struct platform_device *pdev)
938{
939	struct net_device *ndev = platform_get_drvdata(pdev);
940	struct hip04_priv *priv = netdev_priv(ndev);
941	struct device *d = &pdev->dev;
942
943	if (priv->phy)
944		phy_disconnect(priv->phy);
945
946	hip04_free_ring(ndev, d);
947	unregister_netdev(ndev);
948	free_irq(ndev->irq, ndev);
949	of_node_put(priv->phy_node);
950	cancel_work_sync(&priv->tx_timeout_task);
951	free_netdev(ndev);
952
953	return 0;
954}
955
956static const struct of_device_id hip04_mac_match[] = {
957	{ .compatible = "hisilicon,hip04-mac" },
958	{ }
959};
960
961MODULE_DEVICE_TABLE(of, hip04_mac_match);
962
963static struct platform_driver hip04_mac_driver = {
964	.probe	= hip04_mac_probe,
965	.remove	= hip04_remove,
966	.driver	= {
967		.name		= DRV_NAME,
968		.owner		= THIS_MODULE,
969		.of_match_table	= hip04_mac_match,
970	},
971};
972module_platform_driver(hip04_mac_driver);
973
974MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
975MODULE_LICENSE("GPL");
976