1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 *	    Ravi Patel <rapatel@apm.com>
6 *	    Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute  it and/or modify it
9 * under  the terms of  the GNU General  Public License as published by the
10 * Free Software Foundation;  either version 2 of the  License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
24#include "xgene_enet_sgmac.h"
25#include "xgene_enet_xgmac.h"
26
27#define RES_ENET_CSR	0
28#define RES_RING_CSR	1
29#define RES_RING_CMD	2
30
31static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
32{
33	struct xgene_enet_raw_desc16 *raw_desc;
34	int i;
35
36	for (i = 0; i < buf_pool->slots; i++) {
37		raw_desc = &buf_pool->raw_desc16[i];
38
39		/* Hardware expects descriptor in little endian format */
40		raw_desc->m0 = cpu_to_le64(i |
41				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
42				SET_VAL(STASH, 3));
43	}
44}
45
46static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
47				     u32 nbuf)
48{
49	struct sk_buff *skb;
50	struct xgene_enet_raw_desc16 *raw_desc;
51	struct net_device *ndev;
52	struct device *dev;
53	dma_addr_t dma_addr;
54	u32 tail = buf_pool->tail;
55	u32 slots = buf_pool->slots - 1;
56	u16 bufdatalen, len;
57	int i;
58
59	ndev = buf_pool->ndev;
60	dev = ndev_to_dev(buf_pool->ndev);
61	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
62	len = XGENE_ENET_MAX_MTU;
63
64	for (i = 0; i < nbuf; i++) {
65		raw_desc = &buf_pool->raw_desc16[tail];
66
67		skb = netdev_alloc_skb_ip_align(ndev, len);
68		if (unlikely(!skb))
69			return -ENOMEM;
70		buf_pool->rx_skb[tail] = skb;
71
72		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
73		if (dma_mapping_error(dev, dma_addr)) {
74			netdev_err(ndev, "DMA mapping error\n");
75			dev_kfree_skb_any(skb);
76			return -EINVAL;
77		}
78
79		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
80					   SET_VAL(BUFDATALEN, bufdatalen) |
81					   SET_BIT(COHERENT));
82		tail = (tail + 1) & slots;
83	}
84
85	iowrite32(nbuf, buf_pool->cmd);
86	buf_pool->tail = tail;
87
88	return 0;
89}
90
91static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
92{
93	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
94
95	return ((u16)pdata->rm << 10) | ring->num;
96}
97
98static u8 xgene_enet_hdr_len(const void *data)
99{
100	const struct ethhdr *eth = data;
101
102	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
103}
104
105static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
106{
107	u32 __iomem *cmd_base = ring->cmd_base;
108	u32 ring_state, num_msgs;
109
110	ring_state = ioread32(&cmd_base[1]);
111	num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
112
113	return num_msgs >> NUMMSGSINQ_POS;
114}
115
116static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
117{
118	struct xgene_enet_raw_desc16 *raw_desc;
119	u32 slots = buf_pool->slots - 1;
120	u32 tail = buf_pool->tail;
121	u32 userinfo;
122	int i, len;
123
124	len = xgene_enet_ring_len(buf_pool);
125	for (i = 0; i < len; i++) {
126		tail = (tail - 1) & slots;
127		raw_desc = &buf_pool->raw_desc16[tail];
128
129		/* Hardware stores descriptor in little endian format */
130		userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
131		dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
132	}
133
134	iowrite32(-len, buf_pool->cmd);
135	buf_pool->tail = tail;
136}
137
138static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
139{
140	struct xgene_enet_desc_ring *rx_ring = data;
141
142	if (napi_schedule_prep(&rx_ring->napi)) {
143		disable_irq_nosync(irq);
144		__napi_schedule(&rx_ring->napi);
145	}
146
147	return IRQ_HANDLED;
148}
149
150static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
151				    struct xgene_enet_raw_desc *raw_desc)
152{
153	struct sk_buff *skb;
154	struct device *dev;
155	u16 skb_index;
156	u8 status;
157	int ret = 0;
158
159	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
160	skb = cp_ring->cp_skb[skb_index];
161
162	dev = ndev_to_dev(cp_ring->ndev);
163	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
164			 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
165			 DMA_TO_DEVICE);
166
167	/* Checking for error */
168	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
169	if (unlikely(status > 2)) {
170		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
171				       status);
172		ret = -EIO;
173	}
174
175	if (likely(skb)) {
176		dev_kfree_skb_any(skb);
177	} else {
178		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
179		ret = -EIO;
180	}
181
182	return ret;
183}
184
185static u64 xgene_enet_work_msg(struct sk_buff *skb)
186{
187	struct iphdr *iph;
188	u8 l3hlen, l4hlen = 0;
189	u8 csum_enable = 0;
190	u8 proto = 0;
191	u8 ethhdr;
192	u64 hopinfo;
193
194	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
196		goto out;
197
198	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199		goto out;
200
201	iph = ip_hdr(skb);
202	if (unlikely(ip_is_fragment(iph)))
203		goto out;
204
205	if (likely(iph->protocol == IPPROTO_TCP)) {
206		l4hlen = tcp_hdrlen(skb) >> 2;
207		csum_enable = 1;
208		proto = TSO_IPPROTO_TCP;
209	} else if (iph->protocol == IPPROTO_UDP) {
210		l4hlen = UDP_HDR_SIZE;
211		csum_enable = 1;
212	}
213out:
214	l3hlen = ip_hdrlen(skb) >> 2;
215	ethhdr = xgene_enet_hdr_len(skb->data);
216	hopinfo = SET_VAL(TCPHDR, l4hlen) |
217		  SET_VAL(IPHDR, l3hlen) |
218		  SET_VAL(ETHHDR, ethhdr) |
219		  SET_VAL(EC, csum_enable) |
220		  SET_VAL(IS, proto) |
221		  SET_BIT(IC) |
222		  SET_BIT(TYPE_ETH_WORK_MESSAGE);
223
224	return hopinfo;
225}
226
227static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
228				    struct sk_buff *skb)
229{
230	struct device *dev = ndev_to_dev(tx_ring->ndev);
231	struct xgene_enet_raw_desc *raw_desc;
232	dma_addr_t dma_addr;
233	u16 tail = tx_ring->tail;
234	u64 hopinfo;
235
236	raw_desc = &tx_ring->raw_desc[tail];
237	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
238
239	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
240	if (dma_mapping_error(dev, dma_addr)) {
241		netdev_err(tx_ring->ndev, "DMA mapping error\n");
242		return -EINVAL;
243	}
244
245	/* Hardware expects descriptor in little endian format */
246	raw_desc->m0 = cpu_to_le64(tail);
247	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
248				   SET_VAL(BUFDATALEN, skb->len) |
249				   SET_BIT(COHERENT));
250	hopinfo = xgene_enet_work_msg(skb);
251	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
252				   hopinfo);
253	tx_ring->cp_ring->cp_skb[tail] = skb;
254
255	return 0;
256}
257
258static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
259					 struct net_device *ndev)
260{
261	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
262	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
263	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
264	u32 tx_level, cq_level;
265
266	tx_level = xgene_enet_ring_len(tx_ring);
267	cq_level = xgene_enet_ring_len(cp_ring);
268	if (unlikely(tx_level > pdata->tx_qcnt_hi ||
269		     cq_level > pdata->cp_qcnt_hi)) {
270		netif_stop_queue(ndev);
271		return NETDEV_TX_BUSY;
272	}
273
274	if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
275		dev_kfree_skb_any(skb);
276		return NETDEV_TX_OK;
277	}
278
279	iowrite32(1, tx_ring->cmd);
280	skb_tx_timestamp(skb);
281	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
282
283	pdata->stats.tx_packets++;
284	pdata->stats.tx_bytes += skb->len;
285
286	return NETDEV_TX_OK;
287}
288
289static void xgene_enet_skip_csum(struct sk_buff *skb)
290{
291	struct iphdr *iph = ip_hdr(skb);
292
293	if (!ip_is_fragment(iph) ||
294	    (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
295		skb->ip_summed = CHECKSUM_UNNECESSARY;
296	}
297}
298
299static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
300			       struct xgene_enet_raw_desc *raw_desc)
301{
302	struct net_device *ndev;
303	struct xgene_enet_pdata *pdata;
304	struct device *dev;
305	struct xgene_enet_desc_ring *buf_pool;
306	u32 datalen, skb_index;
307	struct sk_buff *skb;
308	u8 status;
309	int ret = 0;
310
311	ndev = rx_ring->ndev;
312	pdata = netdev_priv(ndev);
313	dev = ndev_to_dev(rx_ring->ndev);
314	buf_pool = rx_ring->buf_pool;
315
316	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
317			 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
318	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
319	skb = buf_pool->rx_skb[skb_index];
320
321	/* checking for error */
322	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
323	if (unlikely(status > 2)) {
324		dev_kfree_skb_any(skb);
325		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
326				       status);
327		pdata->stats.rx_dropped++;
328		ret = -EIO;
329		goto out;
330	}
331
332	/* strip off CRC as HW isn't doing this */
333	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
334	datalen -= 4;
335	prefetch(skb->data - NET_IP_ALIGN);
336	skb_put(skb, datalen);
337
338	skb_checksum_none_assert(skb);
339	skb->protocol = eth_type_trans(skb, ndev);
340	if (likely((ndev->features & NETIF_F_IP_CSUM) &&
341		   skb->protocol == htons(ETH_P_IP))) {
342		xgene_enet_skip_csum(skb);
343	}
344
345	pdata->stats.rx_packets++;
346	pdata->stats.rx_bytes += datalen;
347	napi_gro_receive(&rx_ring->napi, skb);
348out:
349	if (--rx_ring->nbufpool == 0) {
350		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
351		rx_ring->nbufpool = NUM_BUFPOOL;
352	}
353
354	return ret;
355}
356
357static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
358{
359	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
360}
361
362static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
363				   int budget)
364{
365	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
366	struct xgene_enet_raw_desc *raw_desc;
367	u16 head = ring->head;
368	u16 slots = ring->slots - 1;
369	int ret, count = 0;
370
371	do {
372		raw_desc = &ring->raw_desc[head];
373		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
374			break;
375
376		/* read fpqnum field after dataaddr field */
377		dma_rmb();
378		if (is_rx_desc(raw_desc))
379			ret = xgene_enet_rx_frame(ring, raw_desc);
380		else
381			ret = xgene_enet_tx_completion(ring, raw_desc);
382		xgene_enet_mark_desc_slot_empty(raw_desc);
383
384		head = (head + 1) & slots;
385		count++;
386
387		if (ret)
388			break;
389	} while (--budget);
390
391	if (likely(count)) {
392		iowrite32(-count, ring->cmd);
393		ring->head = head;
394
395		if (netif_queue_stopped(ring->ndev)) {
396			if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
397				netif_wake_queue(ring->ndev);
398		}
399	}
400
401	return count;
402}
403
404static int xgene_enet_napi(struct napi_struct *napi, const int budget)
405{
406	struct xgene_enet_desc_ring *ring;
407	int processed;
408
409	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
410	processed = xgene_enet_process_ring(ring, budget);
411
412	if (processed != budget) {
413		napi_complete(napi);
414		enable_irq(ring->irq);
415	}
416
417	return processed;
418}
419
420static void xgene_enet_timeout(struct net_device *ndev)
421{
422	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
423
424	pdata->mac_ops->reset(pdata);
425}
426
427static int xgene_enet_register_irq(struct net_device *ndev)
428{
429	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
430	struct device *dev = ndev_to_dev(ndev);
431	struct xgene_enet_desc_ring *ring;
432	int ret;
433
434	ring = pdata->rx_ring;
435	ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
436			       IRQF_SHARED, ring->irq_name, ring);
437	if (ret)
438		netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
439
440	if (pdata->cq_cnt) {
441		ring = pdata->tx_ring->cp_ring;
442		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
443				       IRQF_SHARED, ring->irq_name, ring);
444		if (ret) {
445			netdev_err(ndev, "Failed to request irq %s\n",
446				   ring->irq_name);
447		}
448	}
449
450	return ret;
451}
452
453static void xgene_enet_free_irq(struct net_device *ndev)
454{
455	struct xgene_enet_pdata *pdata;
456	struct device *dev;
457
458	pdata = netdev_priv(ndev);
459	dev = ndev_to_dev(ndev);
460	devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
461
462	if (pdata->cq_cnt) {
463		devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
464			      pdata->tx_ring->cp_ring);
465	}
466}
467
468static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
469{
470	struct napi_struct *napi;
471
472	napi = &pdata->rx_ring->napi;
473	napi_enable(napi);
474
475	if (pdata->cq_cnt) {
476		napi = &pdata->tx_ring->cp_ring->napi;
477		napi_enable(napi);
478	}
479}
480
481static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
482{
483	struct napi_struct *napi;
484
485	napi = &pdata->rx_ring->napi;
486	napi_disable(napi);
487
488	if (pdata->cq_cnt) {
489		napi = &pdata->tx_ring->cp_ring->napi;
490		napi_disable(napi);
491	}
492}
493
494static int xgene_enet_open(struct net_device *ndev)
495{
496	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
497	struct xgene_mac_ops *mac_ops = pdata->mac_ops;
498	int ret;
499
500	mac_ops->tx_enable(pdata);
501	mac_ops->rx_enable(pdata);
502
503	ret = xgene_enet_register_irq(ndev);
504	if (ret)
505		return ret;
506	xgene_enet_napi_enable(pdata);
507
508	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
509		phy_start(pdata->phy_dev);
510	else
511		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
512
513	netif_start_queue(ndev);
514
515	return ret;
516}
517
518static int xgene_enet_close(struct net_device *ndev)
519{
520	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
521	struct xgene_mac_ops *mac_ops = pdata->mac_ops;
522
523	netif_stop_queue(ndev);
524
525	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
526		phy_stop(pdata->phy_dev);
527	else
528		cancel_delayed_work_sync(&pdata->link_work);
529
530	xgene_enet_napi_disable(pdata);
531	xgene_enet_free_irq(ndev);
532	xgene_enet_process_ring(pdata->rx_ring, -1);
533
534	mac_ops->tx_disable(pdata);
535	mac_ops->rx_disable(pdata);
536
537	return 0;
538}
539
540static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
541{
542	struct xgene_enet_pdata *pdata;
543	struct device *dev;
544
545	pdata = netdev_priv(ring->ndev);
546	dev = ndev_to_dev(ring->ndev);
547
548	xgene_enet_clear_ring(ring);
549	dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
550}
551
552static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
553{
554	struct xgene_enet_desc_ring *buf_pool;
555
556	if (pdata->tx_ring) {
557		xgene_enet_delete_ring(pdata->tx_ring);
558		pdata->tx_ring = NULL;
559	}
560
561	if (pdata->rx_ring) {
562		buf_pool = pdata->rx_ring->buf_pool;
563		xgene_enet_delete_bufpool(buf_pool);
564		xgene_enet_delete_ring(buf_pool);
565		xgene_enet_delete_ring(pdata->rx_ring);
566		pdata->rx_ring = NULL;
567	}
568}
569
570static int xgene_enet_get_ring_size(struct device *dev,
571				    enum xgene_enet_ring_cfgsize cfgsize)
572{
573	int size = -EINVAL;
574
575	switch (cfgsize) {
576	case RING_CFGSIZE_512B:
577		size = 0x200;
578		break;
579	case RING_CFGSIZE_2KB:
580		size = 0x800;
581		break;
582	case RING_CFGSIZE_16KB:
583		size = 0x4000;
584		break;
585	case RING_CFGSIZE_64KB:
586		size = 0x10000;
587		break;
588	case RING_CFGSIZE_512KB:
589		size = 0x80000;
590		break;
591	default:
592		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
593		break;
594	}
595
596	return size;
597}
598
599static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
600{
601	struct device *dev;
602
603	if (!ring)
604		return;
605
606	dev = ndev_to_dev(ring->ndev);
607
608	if (ring->desc_addr) {
609		xgene_enet_clear_ring(ring);
610		dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
611	}
612	devm_kfree(dev, ring);
613}
614
615static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
616{
617	struct device *dev = &pdata->pdev->dev;
618	struct xgene_enet_desc_ring *ring;
619
620	ring = pdata->tx_ring;
621	if (ring) {
622		if (ring->cp_ring && ring->cp_ring->cp_skb)
623			devm_kfree(dev, ring->cp_ring->cp_skb);
624		if (ring->cp_ring && pdata->cq_cnt)
625			xgene_enet_free_desc_ring(ring->cp_ring);
626		xgene_enet_free_desc_ring(ring);
627	}
628
629	ring = pdata->rx_ring;
630	if (ring) {
631		if (ring->buf_pool) {
632			if (ring->buf_pool->rx_skb)
633				devm_kfree(dev, ring->buf_pool->rx_skb);
634			xgene_enet_free_desc_ring(ring->buf_pool);
635		}
636		xgene_enet_free_desc_ring(ring);
637	}
638}
639
640static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
641			struct net_device *ndev, u32 ring_num,
642			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
643{
644	struct xgene_enet_desc_ring *ring;
645	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
646	struct device *dev = ndev_to_dev(ndev);
647	int size;
648
649	size = xgene_enet_get_ring_size(dev, cfgsize);
650	if (size < 0)
651		return NULL;
652
653	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
654			    GFP_KERNEL);
655	if (!ring)
656		return NULL;
657
658	ring->ndev = ndev;
659	ring->num = ring_num;
660	ring->cfgsize = cfgsize;
661	ring->id = ring_id;
662
663	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
664					      GFP_KERNEL);
665	if (!ring->desc_addr) {
666		devm_kfree(dev, ring);
667		return NULL;
668	}
669	ring->size = size;
670
671	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
672	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
673	ring = xgene_enet_setup_ring(ring);
674	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
675		   ring->num, ring->size, ring->id, ring->slots);
676
677	return ring;
678}
679
680static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
681{
682	return (owner << 6) | (bufnum & GENMASK(5, 0));
683}
684
685static int xgene_enet_create_desc_rings(struct net_device *ndev)
686{
687	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
688	struct device *dev = ndev_to_dev(ndev);
689	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
690	struct xgene_enet_desc_ring *buf_pool = NULL;
691	u8 cpu_bufnum = pdata->cpu_bufnum;
692	u8 eth_bufnum = pdata->eth_bufnum;
693	u8 bp_bufnum = pdata->bp_bufnum;
694	u16 ring_num = pdata->ring_num;
695	u16 ring_id;
696	int ret;
697
698	/* allocate rx descriptor ring */
699	ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
700	rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
701					      RING_CFGSIZE_16KB, ring_id);
702	if (!rx_ring) {
703		ret = -ENOMEM;
704		goto err;
705	}
706
707	/* allocate buffer pool for receiving packets */
708	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
709	buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
710					       RING_CFGSIZE_2KB, ring_id);
711	if (!buf_pool) {
712		ret = -ENOMEM;
713		goto err;
714	}
715
716	rx_ring->nbufpool = NUM_BUFPOOL;
717	rx_ring->buf_pool = buf_pool;
718	rx_ring->irq = pdata->rx_irq;
719	if (!pdata->cq_cnt) {
720		snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
721			 ndev->name);
722	} else {
723		snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
724	}
725	buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
726					sizeof(struct sk_buff *), GFP_KERNEL);
727	if (!buf_pool->rx_skb) {
728		ret = -ENOMEM;
729		goto err;
730	}
731
732	buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
733	rx_ring->buf_pool = buf_pool;
734	pdata->rx_ring = rx_ring;
735
736	/* allocate tx descriptor ring */
737	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
738	tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
739					      RING_CFGSIZE_16KB, ring_id);
740	if (!tx_ring) {
741		ret = -ENOMEM;
742		goto err;
743	}
744	pdata->tx_ring = tx_ring;
745
746	if (!pdata->cq_cnt) {
747		cp_ring = pdata->rx_ring;
748	} else {
749		/* allocate tx completion descriptor ring */
750		ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
751		cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
752						      RING_CFGSIZE_16KB,
753						      ring_id);
754		if (!cp_ring) {
755			ret = -ENOMEM;
756			goto err;
757		}
758		cp_ring->irq = pdata->txc_irq;
759		snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
760	}
761
762	cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
763				       sizeof(struct sk_buff *), GFP_KERNEL);
764	if (!cp_ring->cp_skb) {
765		ret = -ENOMEM;
766		goto err;
767	}
768	pdata->tx_ring->cp_ring = cp_ring;
769	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
770
771	pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
772	pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
773	pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
774
775	return 0;
776
777err:
778	xgene_enet_free_desc_rings(pdata);
779	return ret;
780}
781
782static struct rtnl_link_stats64 *xgene_enet_get_stats64(
783			struct net_device *ndev,
784			struct rtnl_link_stats64 *storage)
785{
786	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
787	struct rtnl_link_stats64 *stats = &pdata->stats;
788
789	stats->rx_errors += stats->rx_length_errors +
790			    stats->rx_crc_errors +
791			    stats->rx_frame_errors +
792			    stats->rx_fifo_errors;
793	memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
794
795	return storage;
796}
797
798static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
799{
800	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
801	int ret;
802
803	ret = eth_mac_addr(ndev, addr);
804	if (ret)
805		return ret;
806	pdata->mac_ops->set_mac_addr(pdata);
807
808	return ret;
809}
810
811static const struct net_device_ops xgene_ndev_ops = {
812	.ndo_open = xgene_enet_open,
813	.ndo_stop = xgene_enet_close,
814	.ndo_start_xmit = xgene_enet_start_xmit,
815	.ndo_tx_timeout = xgene_enet_timeout,
816	.ndo_get_stats64 = xgene_enet_get_stats64,
817	.ndo_change_mtu = eth_change_mtu,
818	.ndo_set_mac_address = xgene_enet_set_mac_address,
819};
820
821static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
822{
823	u32 id = 0;
824	int ret;
825
826	ret = device_property_read_u32(dev, "port-id", &id);
827	if (!ret && id > 1) {
828		dev_err(dev, "Incorrect port-id specified\n");
829		return -ENODEV;
830	}
831
832	pdata->port_id = id;
833
834	return 0;
835}
836
837static int xgene_get_mac_address(struct device *dev,
838				 unsigned char *addr)
839{
840	int ret;
841
842	ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
843	if (ret)
844		ret = device_property_read_u8_array(dev, "mac-address",
845						    addr, 6);
846	if (ret)
847		return -ENODEV;
848
849	return ETH_ALEN;
850}
851
852static int xgene_get_phy_mode(struct device *dev)
853{
854	int i, ret;
855	char *modestr;
856
857	ret = device_property_read_string(dev, "phy-connection-type",
858					  (const char **)&modestr);
859	if (ret)
860		ret = device_property_read_string(dev, "phy-mode",
861						  (const char **)&modestr);
862	if (ret)
863		return -ENODEV;
864
865	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
866		if (!strcasecmp(modestr, phy_modes(i)))
867			return i;
868	}
869	return -ENODEV;
870}
871
872static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
873{
874	struct platform_device *pdev;
875	struct net_device *ndev;
876	struct device *dev;
877	struct resource *res;
878	void __iomem *base_addr;
879	int ret;
880
881	pdev = pdata->pdev;
882	dev = &pdev->dev;
883	ndev = pdata->ndev;
884
885	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
886	if (!res) {
887		dev_err(dev, "Resource enet_csr not defined\n");
888		return -ENODEV;
889	}
890	pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
891	if (!pdata->base_addr) {
892		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
893		return -ENOMEM;
894	}
895
896	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
897	if (!res) {
898		dev_err(dev, "Resource ring_csr not defined\n");
899		return -ENODEV;
900	}
901	pdata->ring_csr_addr = devm_ioremap(dev, res->start,
902							resource_size(res));
903	if (!pdata->ring_csr_addr) {
904		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
905		return -ENOMEM;
906	}
907
908	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
909	if (!res) {
910		dev_err(dev, "Resource ring_cmd not defined\n");
911		return -ENODEV;
912	}
913	pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
914							resource_size(res));
915	if (!pdata->ring_cmd_addr) {
916		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
917		return -ENOMEM;
918	}
919
920	ret = xgene_get_port_id(dev, pdata);
921	if (ret)
922		return ret;
923
924	if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
925		eth_hw_addr_random(ndev);
926
927	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
928
929	pdata->phy_mode = xgene_get_phy_mode(dev);
930	if (pdata->phy_mode < 0) {
931		dev_err(dev, "Unable to get phy-connection-type\n");
932		return pdata->phy_mode;
933	}
934	if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
935	    pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
936	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
937		dev_err(dev, "Incorrect phy-connection-type specified\n");
938		return -ENODEV;
939	}
940
941	ret = platform_get_irq(pdev, 0);
942	if (ret <= 0) {
943		dev_err(dev, "Unable to get ENET Rx IRQ\n");
944		ret = ret ? : -ENXIO;
945		return ret;
946	}
947	pdata->rx_irq = ret;
948
949	if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
950		ret = platform_get_irq(pdev, 1);
951		if (ret <= 0) {
952			dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
953			ret = ret ? : -ENXIO;
954			return ret;
955		}
956		pdata->txc_irq = ret;
957	}
958
959	pdata->clk = devm_clk_get(&pdev->dev, NULL);
960	if (IS_ERR(pdata->clk)) {
961		/* Firmware may have set up the clock already. */
962		pdata->clk = NULL;
963	}
964
965	base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
966	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
967	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
968	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
969	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
970	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
971		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
972		pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
973	} else {
974		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
975		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
976	}
977	pdata->rx_buff_cnt = NUM_PKT_BUF;
978
979	return 0;
980}
981
982static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
983{
984	struct net_device *ndev = pdata->ndev;
985	struct xgene_enet_desc_ring *buf_pool;
986	u16 dst_ring_num;
987	int ret;
988
989	ret = pdata->port_ops->reset(pdata);
990	if (ret)
991		return ret;
992
993	ret = xgene_enet_create_desc_rings(ndev);
994	if (ret) {
995		netdev_err(ndev, "Error in ring configuration\n");
996		return ret;
997	}
998
999	/* setup buffer pool */
1000	buf_pool = pdata->rx_ring->buf_pool;
1001	xgene_enet_init_bufpool(buf_pool);
1002	ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1003	if (ret) {
1004		xgene_enet_delete_desc_rings(pdata);
1005		return ret;
1006	}
1007
1008	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
1009	pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1010	pdata->mac_ops->init(pdata);
1011
1012	return ret;
1013}
1014
1015static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1016{
1017	switch (pdata->phy_mode) {
1018	case PHY_INTERFACE_MODE_RGMII:
1019		pdata->mac_ops = &xgene_gmac_ops;
1020		pdata->port_ops = &xgene_gport_ops;
1021		pdata->rm = RM3;
1022		break;
1023	case PHY_INTERFACE_MODE_SGMII:
1024		pdata->mac_ops = &xgene_sgmac_ops;
1025		pdata->port_ops = &xgene_sgport_ops;
1026		pdata->rm = RM1;
1027		pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1028		break;
1029	default:
1030		pdata->mac_ops = &xgene_xgmac_ops;
1031		pdata->port_ops = &xgene_xgport_ops;
1032		pdata->rm = RM0;
1033		pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1034		break;
1035	}
1036
1037	switch (pdata->port_id) {
1038	case 0:
1039		pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1040		pdata->eth_bufnum = START_ETH_BUFNUM_0;
1041		pdata->bp_bufnum = START_BP_BUFNUM_0;
1042		pdata->ring_num = START_RING_NUM_0;
1043		break;
1044	case 1:
1045		pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1046		pdata->eth_bufnum = START_ETH_BUFNUM_1;
1047		pdata->bp_bufnum = START_BP_BUFNUM_1;
1048		pdata->ring_num = START_RING_NUM_1;
1049		break;
1050	default:
1051		break;
1052	}
1053
1054}
1055
1056static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1057{
1058	struct napi_struct *napi;
1059
1060	napi = &pdata->rx_ring->napi;
1061	netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1062
1063	if (pdata->cq_cnt) {
1064		napi = &pdata->tx_ring->cp_ring->napi;
1065		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1066			       NAPI_POLL_WEIGHT);
1067	}
1068}
1069
1070static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1071{
1072	struct napi_struct *napi;
1073
1074	napi = &pdata->rx_ring->napi;
1075	netif_napi_del(napi);
1076
1077	if (pdata->cq_cnt) {
1078		napi = &pdata->tx_ring->cp_ring->napi;
1079		netif_napi_del(napi);
1080	}
1081}
1082
1083static int xgene_enet_probe(struct platform_device *pdev)
1084{
1085	struct net_device *ndev;
1086	struct xgene_enet_pdata *pdata;
1087	struct device *dev = &pdev->dev;
1088	struct xgene_mac_ops *mac_ops;
1089	int ret;
1090
1091	ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1092	if (!ndev)
1093		return -ENOMEM;
1094
1095	pdata = netdev_priv(ndev);
1096
1097	pdata->pdev = pdev;
1098	pdata->ndev = ndev;
1099	SET_NETDEV_DEV(ndev, dev);
1100	platform_set_drvdata(pdev, pdata);
1101	ndev->netdev_ops = &xgene_ndev_ops;
1102	xgene_enet_set_ethtool_ops(ndev);
1103	ndev->features |= NETIF_F_IP_CSUM |
1104			  NETIF_F_GSO |
1105			  NETIF_F_GRO;
1106
1107	ret = xgene_enet_get_resources(pdata);
1108	if (ret)
1109		goto err;
1110
1111	xgene_enet_setup_ops(pdata);
1112
1113	ret = register_netdev(ndev);
1114	if (ret) {
1115		netdev_err(ndev, "Failed to register netdev\n");
1116		goto err;
1117	}
1118
1119	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1120	if (ret) {
1121		netdev_err(ndev, "No usable DMA configuration\n");
1122		goto err;
1123	}
1124
1125	ret = xgene_enet_init_hw(pdata);
1126	if (ret)
1127		goto err;
1128
1129	xgene_enet_napi_add(pdata);
1130	mac_ops = pdata->mac_ops;
1131	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1132		ret = xgene_enet_mdio_config(pdata);
1133	else
1134		INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1135
1136	return ret;
1137err:
1138	unregister_netdev(ndev);
1139	free_netdev(ndev);
1140	return ret;
1141}
1142
1143static int xgene_enet_remove(struct platform_device *pdev)
1144{
1145	struct xgene_enet_pdata *pdata;
1146	struct xgene_mac_ops *mac_ops;
1147	struct net_device *ndev;
1148
1149	pdata = platform_get_drvdata(pdev);
1150	mac_ops = pdata->mac_ops;
1151	ndev = pdata->ndev;
1152
1153	mac_ops->rx_disable(pdata);
1154	mac_ops->tx_disable(pdata);
1155
1156	xgene_enet_napi_del(pdata);
1157	xgene_enet_mdio_remove(pdata);
1158	xgene_enet_delete_desc_rings(pdata);
1159	unregister_netdev(ndev);
1160	pdata->port_ops->shutdown(pdata);
1161	free_netdev(ndev);
1162
1163	return 0;
1164}
1165
1166#ifdef CONFIG_ACPI
1167static const struct acpi_device_id xgene_enet_acpi_match[] = {
1168	{ "APMC0D05", },
1169	{ "APMC0D30", },
1170	{ "APMC0D31", },
1171	{ }
1172};
1173MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1174#endif
1175
1176#ifdef CONFIG_OF
1177static const struct of_device_id xgene_enet_of_match[] = {
1178	{.compatible = "apm,xgene-enet",},
1179	{.compatible = "apm,xgene1-sgenet",},
1180	{.compatible = "apm,xgene1-xgenet",},
1181	{},
1182};
1183
1184MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1185#endif
1186
1187static struct platform_driver xgene_enet_driver = {
1188	.driver = {
1189		   .name = "xgene-enet",
1190		   .of_match_table = of_match_ptr(xgene_enet_of_match),
1191		   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1192	},
1193	.probe = xgene_enet_probe,
1194	.remove = xgene_enet_remove,
1195};
1196
1197module_platform_driver(xgene_enet_driver);
1198
1199MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1200MODULE_VERSION(XGENE_DRV_VERSION);
1201MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1202MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1203MODULE_LICENSE("GPL");
1204