1/*
2 * Keystone GBE and XGBE subsystem code
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors:	Sandeep Nair <sandeep_n@ti.com>
6 *		Sandeep Paulraj <s-paulraj@ti.com>
7 *		Cyril Chemparathy <cyril@ti.com>
8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *		Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION		"v1.0"
33
34#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
37#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME			"netcp-gbe"
41#define GBE_SS_VERSION_14		0x4ed21104
42
43#define GBE_SS_REG_INDEX		0
44#define GBE_SGMII34_REG_INDEX		1
45#define GBE_SM_REG_INDEX		2
46/* offset relative to base of GBE_SS_REG_INDEX */
47#define GBE13_SGMII_MODULE_OFFSET	0x100
48/* offset relative to base of GBE_SM_REG_INDEX */
49#define GBE13_HOST_PORT_OFFSET		0x34
50#define GBE13_SLAVE_PORT_OFFSET		0x60
51#define GBE13_EMAC_OFFSET		0x100
52#define GBE13_SLAVE_PORT2_OFFSET	0x200
53#define GBE13_HW_STATS_OFFSET		0x300
54#define GBE13_ALE_OFFSET		0x600
55#define GBE13_HOST_PORT_NUM		0
56#define GBE13_NUM_ALE_ENTRIES		1024
57
58/* 1G Ethernet NU SS defines */
59#define GBENU_MODULE_NAME		"netcp-gbenu"
60#define GBE_SS_ID_NU			0x4ee6
61#define GBE_SS_ID_2U			0x4ee8
62
63#define IS_SS_ID_MU(d) \
64	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
65	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
66
67#define IS_SS_ID_NU(d) \
68	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
69
70#define GBENU_SS_REG_INDEX		0
71#define GBENU_SM_REG_INDEX		1
72#define GBENU_SGMII_MODULE_OFFSET	0x100
73#define GBENU_HOST_PORT_OFFSET		0x1000
74#define GBENU_SLAVE_PORT_OFFSET		0x2000
75#define GBENU_EMAC_OFFSET		0x2330
76#define GBENU_HW_STATS_OFFSET		0x1a000
77#define GBENU_ALE_OFFSET		0x1e000
78#define GBENU_HOST_PORT_NUM		0
79#define GBENU_NUM_ALE_ENTRIES		1024
80
81/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME		"netcp-xgbe"
83#define XGBE_SS_VERSION_10		0x4ee42100
84
85#define XGBE_SS_REG_INDEX		0
86#define XGBE_SM_REG_INDEX		1
87#define XGBE_SERDES_REG_INDEX		2
88
89/* offset relative to base of XGBE_SS_REG_INDEX */
90#define XGBE10_SGMII_MODULE_OFFSET	0x100
91/* offset relative to base of XGBE_SM_REG_INDEX */
92#define XGBE10_HOST_PORT_OFFSET		0x34
93#define XGBE10_SLAVE_PORT_OFFSET	0x64
94#define XGBE10_EMAC_OFFSET		0x400
95#define XGBE10_ALE_OFFSET		0x700
96#define XGBE10_HW_STATS_OFFSET		0x800
97#define XGBE10_HOST_PORT_NUM		0
98#define XGBE10_NUM_ALE_ENTRIES		1024
99
100#define	GBE_TIMER_INTERVAL			(HZ / 2)
101
102/* Soft reset register values */
103#define SOFT_RESET_MASK				BIT(0)
104#define SOFT_RESET				BIT(0)
105#define DEVICE_EMACSL_RESET_POLL_COUNT		100
106#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
107
108#define MACSL_RX_ENABLE_CSF			BIT(23)
109#define MACSL_ENABLE_EXT_CTL			BIT(18)
110#define MACSL_XGMII_ENABLE			BIT(13)
111#define MACSL_XGIG_MODE				BIT(8)
112#define MACSL_GIG_MODE				BIT(7)
113#define MACSL_GMII_ENABLE			BIT(5)
114#define MACSL_FULLDUPLEX			BIT(0)
115
116#define GBE_CTL_P0_ENABLE			BIT(2)
117#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
118#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
119#define GBE_STATS_CD_SEL			BIT(28)
120
121#define GBE_PORT_MASK(x)			(BIT(x) - 1)
122#define GBE_MASK_NO_PORTS			0
123
124#define GBE_DEF_1G_MAC_CONTROL					\
125		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
126		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
127
128#define GBE_DEF_10G_MAC_CONTROL				\
129		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
130		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
131
132#define GBE_STATSA_MODULE			0
133#define GBE_STATSB_MODULE			1
134#define GBE_STATSC_MODULE			2
135#define GBE_STATSD_MODULE			3
136
137#define GBENU_STATS0_MODULE			0
138#define GBENU_STATS1_MODULE			1
139#define GBENU_STATS2_MODULE			2
140#define GBENU_STATS3_MODULE			3
141#define GBENU_STATS4_MODULE			4
142#define GBENU_STATS5_MODULE			5
143#define GBENU_STATS6_MODULE			6
144#define GBENU_STATS7_MODULE			7
145#define GBENU_STATS8_MODULE			8
146
147#define XGBE_STATS0_MODULE			0
148#define XGBE_STATS1_MODULE			1
149#define XGBE_STATS2_MODULE			2
150
151/* s: 0-based slave_port */
152#define SGMII_BASE(s) \
153	(((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
154
155#define GBE_TX_QUEUE				648
156#define	GBE_TXHOOK_ORDER			0
157#define GBE_DEFAULT_ALE_AGEOUT			30
158#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
159#define NETCP_LINK_STATE_INVALID		-1
160
161#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
162		offsetof(struct gbe##_##rb, rn)
163#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
164		offsetof(struct gbenu##_##rb, rn)
165#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
166		offsetof(struct xgbe##_##rb, rn)
167#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
168
169#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
170
171struct xgbe_ss_regs {
172	u32	id_ver;
173	u32	synce_count;
174	u32	synce_mux;
175	u32	control;
176};
177
178struct xgbe_switch_regs {
179	u32	id_ver;
180	u32	control;
181	u32	emcontrol;
182	u32	stat_port_en;
183	u32	ptype;
184	u32	soft_idle;
185	u32	thru_rate;
186	u32	gap_thresh;
187	u32	tx_start_wds;
188	u32	flow_control;
189	u32	cppi_thresh;
190};
191
192struct xgbe_port_regs {
193	u32	blk_cnt;
194	u32	port_vlan;
195	u32	tx_pri_map;
196	u32	sa_lo;
197	u32	sa_hi;
198	u32	ts_ctl;
199	u32	ts_seq_ltype;
200	u32	ts_vlan;
201	u32	ts_ctl_ltype2;
202	u32	ts_ctl2;
203	u32	control;
204};
205
206struct xgbe_host_port_regs {
207	u32	blk_cnt;
208	u32	port_vlan;
209	u32	tx_pri_map;
210	u32	src_id;
211	u32	rx_pri_map;
212	u32	rx_maxlen;
213};
214
215struct xgbe_emac_regs {
216	u32	id_ver;
217	u32	mac_control;
218	u32	mac_status;
219	u32	soft_reset;
220	u32	rx_maxlen;
221	u32	__reserved_0;
222	u32	rx_pause;
223	u32	tx_pause;
224	u32	em_control;
225	u32	__reserved_1;
226	u32	tx_gap;
227	u32	rsvd[4];
228};
229
230struct xgbe_host_hw_stats {
231	u32	rx_good_frames;
232	u32	rx_broadcast_frames;
233	u32	rx_multicast_frames;
234	u32	__rsvd_0[3];
235	u32	rx_oversized_frames;
236	u32	__rsvd_1;
237	u32	rx_undersized_frames;
238	u32	__rsvd_2;
239	u32	overrun_type4;
240	u32	overrun_type5;
241	u32	rx_bytes;
242	u32	tx_good_frames;
243	u32	tx_broadcast_frames;
244	u32	tx_multicast_frames;
245	u32	__rsvd_3[9];
246	u32	tx_bytes;
247	u32	tx_64byte_frames;
248	u32	tx_65_to_127byte_frames;
249	u32	tx_128_to_255byte_frames;
250	u32	tx_256_to_511byte_frames;
251	u32	tx_512_to_1023byte_frames;
252	u32	tx_1024byte_frames;
253	u32	net_bytes;
254	u32	rx_sof_overruns;
255	u32	rx_mof_overruns;
256	u32	rx_dma_overruns;
257};
258
259struct xgbe_hw_stats {
260	u32	rx_good_frames;
261	u32	rx_broadcast_frames;
262	u32	rx_multicast_frames;
263	u32	rx_pause_frames;
264	u32	rx_crc_errors;
265	u32	rx_align_code_errors;
266	u32	rx_oversized_frames;
267	u32	rx_jabber_frames;
268	u32	rx_undersized_frames;
269	u32	rx_fragments;
270	u32	overrun_type4;
271	u32	overrun_type5;
272	u32	rx_bytes;
273	u32	tx_good_frames;
274	u32	tx_broadcast_frames;
275	u32	tx_multicast_frames;
276	u32	tx_pause_frames;
277	u32	tx_deferred_frames;
278	u32	tx_collision_frames;
279	u32	tx_single_coll_frames;
280	u32	tx_mult_coll_frames;
281	u32	tx_excessive_collisions;
282	u32	tx_late_collisions;
283	u32	tx_underrun;
284	u32	tx_carrier_sense_errors;
285	u32	tx_bytes;
286	u32	tx_64byte_frames;
287	u32	tx_65_to_127byte_frames;
288	u32	tx_128_to_255byte_frames;
289	u32	tx_256_to_511byte_frames;
290	u32	tx_512_to_1023byte_frames;
291	u32	tx_1024byte_frames;
292	u32	net_bytes;
293	u32	rx_sof_overruns;
294	u32	rx_mof_overruns;
295	u32	rx_dma_overruns;
296};
297
298#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
299
300struct gbenu_ss_regs {
301	u32	id_ver;
302	u32	synce_count;		/* NU */
303	u32	synce_mux;		/* NU */
304	u32	control;		/* 2U */
305	u32	__rsvd_0[2];		/* 2U */
306	u32	rgmii_status;		/* 2U */
307	u32	ss_status;		/* 2U */
308};
309
310struct gbenu_switch_regs {
311	u32	id_ver;
312	u32	control;
313	u32	__rsvd_0[2];
314	u32	emcontrol;
315	u32	stat_port_en;
316	u32	ptype;			/* NU */
317	u32	soft_idle;
318	u32	thru_rate;		/* NU */
319	u32	gap_thresh;		/* NU */
320	u32	tx_start_wds;		/* NU */
321	u32	eee_prescale;		/* 2U */
322	u32	tx_g_oflow_thresh_set;	/* NU */
323	u32	tx_g_oflow_thresh_clr;	/* NU */
324	u32	tx_g_buf_thresh_set_l;	/* NU */
325	u32	tx_g_buf_thresh_set_h;	/* NU */
326	u32	tx_g_buf_thresh_clr_l;	/* NU */
327	u32	tx_g_buf_thresh_clr_h;	/* NU */
328};
329
330struct gbenu_port_regs {
331	u32	__rsvd_0;
332	u32	control;
333	u32	max_blks;		/* 2U */
334	u32	mem_align1;
335	u32	blk_cnt;
336	u32	port_vlan;
337	u32	tx_pri_map;		/* NU */
338	u32	pri_ctl;		/* 2U */
339	u32	rx_pri_map;
340	u32	rx_maxlen;
341	u32	tx_blks_pri;		/* NU */
342	u32	__rsvd_1;
343	u32	idle2lpi;		/* 2U */
344	u32	lpi2idle;		/* 2U */
345	u32	eee_status;		/* 2U */
346	u32	__rsvd_2;
347	u32	__rsvd_3[176];		/* NU: more to add */
348	u32	__rsvd_4[2];
349	u32	sa_lo;
350	u32	sa_hi;
351	u32	ts_ctl;
352	u32	ts_seq_ltype;
353	u32	ts_vlan;
354	u32	ts_ctl_ltype2;
355	u32	ts_ctl2;
356};
357
358struct gbenu_host_port_regs {
359	u32	__rsvd_0;
360	u32	control;
361	u32	flow_id_offset;		/* 2U */
362	u32	__rsvd_1;
363	u32	blk_cnt;
364	u32	port_vlan;
365	u32	tx_pri_map;		/* NU */
366	u32	pri_ctl;
367	u32	rx_pri_map;
368	u32	rx_maxlen;
369	u32	tx_blks_pri;		/* NU */
370	u32	__rsvd_2;
371	u32	idle2lpi;		/* 2U */
372	u32	lpi2wake;		/* 2U */
373	u32	eee_status;		/* 2U */
374	u32	__rsvd_3;
375	u32	__rsvd_4[184];		/* NU */
376	u32	host_blks_pri;		/* NU */
377};
378
379struct gbenu_emac_regs {
380	u32	mac_control;
381	u32	mac_status;
382	u32	soft_reset;
383	u32	boff_test;
384	u32	rx_pause;
385	u32	__rsvd_0[11];		/* NU */
386	u32	tx_pause;
387	u32	__rsvd_1[11];		/* NU */
388	u32	em_control;
389	u32	tx_gap;
390};
391
392/* Some hw stat regs are applicable to slave port only.
393 * This is handled by gbenu_et_stats struct.  Also some
394 * are for SS version NU and some are for 2U.
395 */
396struct gbenu_hw_stats {
397	u32	rx_good_frames;
398	u32	rx_broadcast_frames;
399	u32	rx_multicast_frames;
400	u32	rx_pause_frames;		/* slave */
401	u32	rx_crc_errors;
402	u32	rx_align_code_errors;		/* slave */
403	u32	rx_oversized_frames;
404	u32	rx_jabber_frames;		/* slave */
405	u32	rx_undersized_frames;
406	u32	rx_fragments;			/* slave */
407	u32	ale_drop;
408	u32	ale_overrun_drop;
409	u32	rx_bytes;
410	u32	tx_good_frames;
411	u32	tx_broadcast_frames;
412	u32	tx_multicast_frames;
413	u32	tx_pause_frames;		/* slave */
414	u32	tx_deferred_frames;		/* slave */
415	u32	tx_collision_frames;		/* slave */
416	u32	tx_single_coll_frames;		/* slave */
417	u32	tx_mult_coll_frames;		/* slave */
418	u32	tx_excessive_collisions;	/* slave */
419	u32	tx_late_collisions;		/* slave */
420	u32	rx_ipg_error;			/* slave 10G only */
421	u32	tx_carrier_sense_errors;	/* slave */
422	u32	tx_bytes;
423	u32	tx_64B_frames;
424	u32	tx_65_to_127B_frames;
425	u32	tx_128_to_255B_frames;
426	u32	tx_256_to_511B_frames;
427	u32	tx_512_to_1023B_frames;
428	u32	tx_1024B_frames;
429	u32	net_bytes;
430	u32	rx_bottom_fifo_drop;
431	u32	rx_port_mask_drop;
432	u32	rx_top_fifo_drop;
433	u32	ale_rate_limit_drop;
434	u32	ale_vid_ingress_drop;
435	u32	ale_da_eq_sa_drop;
436	u32	__rsvd_0[3];
437	u32	ale_unknown_ucast;
438	u32	ale_unknown_ucast_bytes;
439	u32	ale_unknown_mcast;
440	u32	ale_unknown_mcast_bytes;
441	u32	ale_unknown_bcast;
442	u32	ale_unknown_bcast_bytes;
443	u32	ale_pol_match;
444	u32	ale_pol_match_red;		/* NU */
445	u32	ale_pol_match_yellow;		/* NU */
446	u32	__rsvd_1[44];
447	u32	tx_mem_protect_err;
448	/* following NU only */
449	u32	tx_pri0;
450	u32	tx_pri1;
451	u32	tx_pri2;
452	u32	tx_pri3;
453	u32	tx_pri4;
454	u32	tx_pri5;
455	u32	tx_pri6;
456	u32	tx_pri7;
457	u32	tx_pri0_bcnt;
458	u32	tx_pri1_bcnt;
459	u32	tx_pri2_bcnt;
460	u32	tx_pri3_bcnt;
461	u32	tx_pri4_bcnt;
462	u32	tx_pri5_bcnt;
463	u32	tx_pri6_bcnt;
464	u32	tx_pri7_bcnt;
465	u32	tx_pri0_drop;
466	u32	tx_pri1_drop;
467	u32	tx_pri2_drop;
468	u32	tx_pri3_drop;
469	u32	tx_pri4_drop;
470	u32	tx_pri5_drop;
471	u32	tx_pri6_drop;
472	u32	tx_pri7_drop;
473	u32	tx_pri0_drop_bcnt;
474	u32	tx_pri1_drop_bcnt;
475	u32	tx_pri2_drop_bcnt;
476	u32	tx_pri3_drop_bcnt;
477	u32	tx_pri4_drop_bcnt;
478	u32	tx_pri5_drop_bcnt;
479	u32	tx_pri6_drop_bcnt;
480	u32	tx_pri7_drop_bcnt;
481};
482
483#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
484#define GBENU_HW_STATS_REG_MAP_SZ	0x200
485
486struct gbe_ss_regs {
487	u32	id_ver;
488	u32	synce_count;
489	u32	synce_mux;
490};
491
492struct gbe_ss_regs_ofs {
493	u16	id_ver;
494	u16	control;
495};
496
497struct gbe_switch_regs {
498	u32	id_ver;
499	u32	control;
500	u32	soft_reset;
501	u32	stat_port_en;
502	u32	ptype;
503	u32	soft_idle;
504	u32	thru_rate;
505	u32	gap_thresh;
506	u32	tx_start_wds;
507	u32	flow_control;
508};
509
510struct gbe_switch_regs_ofs {
511	u16	id_ver;
512	u16	control;
513	u16	soft_reset;
514	u16	emcontrol;
515	u16	stat_port_en;
516	u16	ptype;
517	u16	flow_control;
518};
519
520struct gbe_port_regs {
521	u32	max_blks;
522	u32	blk_cnt;
523	u32	port_vlan;
524	u32	tx_pri_map;
525	u32	sa_lo;
526	u32	sa_hi;
527	u32	ts_ctl;
528	u32	ts_seq_ltype;
529	u32	ts_vlan;
530	u32	ts_ctl_ltype2;
531	u32	ts_ctl2;
532};
533
534struct gbe_port_regs_ofs {
535	u16	port_vlan;
536	u16	tx_pri_map;
537	u16	sa_lo;
538	u16	sa_hi;
539	u16	ts_ctl;
540	u16	ts_seq_ltype;
541	u16	ts_vlan;
542	u16	ts_ctl_ltype2;
543	u16	ts_ctl2;
544	u16	rx_maxlen;	/* 2U, NU */
545};
546
547struct gbe_host_port_regs {
548	u32	src_id;
549	u32	port_vlan;
550	u32	rx_pri_map;
551	u32	rx_maxlen;
552};
553
554struct gbe_host_port_regs_ofs {
555	u16	port_vlan;
556	u16	tx_pri_map;
557	u16	rx_maxlen;
558};
559
560struct gbe_emac_regs {
561	u32	id_ver;
562	u32	mac_control;
563	u32	mac_status;
564	u32	soft_reset;
565	u32	rx_maxlen;
566	u32	__reserved_0;
567	u32	rx_pause;
568	u32	tx_pause;
569	u32	__reserved_1;
570	u32	rx_pri_map;
571	u32	rsvd[6];
572};
573
574struct gbe_emac_regs_ofs {
575	u16	mac_control;
576	u16	soft_reset;
577	u16	rx_maxlen;
578};
579
580struct gbe_hw_stats {
581	u32	rx_good_frames;
582	u32	rx_broadcast_frames;
583	u32	rx_multicast_frames;
584	u32	rx_pause_frames;
585	u32	rx_crc_errors;
586	u32	rx_align_code_errors;
587	u32	rx_oversized_frames;
588	u32	rx_jabber_frames;
589	u32	rx_undersized_frames;
590	u32	rx_fragments;
591	u32	__pad_0[2];
592	u32	rx_bytes;
593	u32	tx_good_frames;
594	u32	tx_broadcast_frames;
595	u32	tx_multicast_frames;
596	u32	tx_pause_frames;
597	u32	tx_deferred_frames;
598	u32	tx_collision_frames;
599	u32	tx_single_coll_frames;
600	u32	tx_mult_coll_frames;
601	u32	tx_excessive_collisions;
602	u32	tx_late_collisions;
603	u32	tx_underrun;
604	u32	tx_carrier_sense_errors;
605	u32	tx_bytes;
606	u32	tx_64byte_frames;
607	u32	tx_65_to_127byte_frames;
608	u32	tx_128_to_255byte_frames;
609	u32	tx_256_to_511byte_frames;
610	u32	tx_512_to_1023byte_frames;
611	u32	tx_1024byte_frames;
612	u32	net_bytes;
613	u32	rx_sof_overruns;
614	u32	rx_mof_overruns;
615	u32	rx_dma_overruns;
616};
617
618#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
619#define GBE_MAX_HW_STAT_MODS			9
620#define GBE_HW_STATS_REG_MAP_SZ			0x100
621
622struct gbe_slave {
623	void __iomem			*port_regs;
624	void __iomem			*emac_regs;
625	struct gbe_port_regs_ofs	port_regs_ofs;
626	struct gbe_emac_regs_ofs	emac_regs_ofs;
627	int				slave_num; /* 0 based logical number */
628	int				port_num;  /* actual port number */
629	atomic_t			link_state;
630	bool				open;
631	struct phy_device		*phy;
632	u32				link_interface;
633	u32				mac_control;
634	u8				phy_port_t;
635	struct device_node		*phy_node;
636	struct list_head		slave_list;
637};
638
639struct gbe_priv {
640	struct device			*dev;
641	struct netcp_device		*netcp_device;
642	struct timer_list		timer;
643	u32				num_slaves;
644	u32				ale_entries;
645	u32				ale_ports;
646	bool				enable_ale;
647	u8				max_num_slaves;
648	u8				max_num_ports; /* max_num_slaves + 1 */
649	struct netcp_tx_pipe		tx_pipe;
650
651	int				host_port;
652	u32				rx_packet_max;
653	u32				ss_version;
654	u32				stats_en_mask;
655
656	void __iomem			*ss_regs;
657	void __iomem			*switch_regs;
658	void __iomem			*host_port_regs;
659	void __iomem			*ale_reg;
660	void __iomem			*sgmii_port_regs;
661	void __iomem			*sgmii_port34_regs;
662	void __iomem			*xgbe_serdes_regs;
663	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
664
665	struct gbe_ss_regs_ofs		ss_regs_ofs;
666	struct gbe_switch_regs_ofs	switch_regs_ofs;
667	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
668
669	struct cpsw_ale			*ale;
670	unsigned int			tx_queue_id;
671	const char			*dma_chan_name;
672
673	struct list_head		gbe_intf_head;
674	struct list_head		secondary_slaves;
675	struct net_device		*dummy_ndev;
676
677	u64				*hw_stats;
678	const struct netcp_ethtool_stat *et_stats;
679	int				num_et_stats;
680	/*  Lock for updating the hwstats */
681	spinlock_t			hw_stats_lock;
682};
683
684struct gbe_intf {
685	struct net_device	*ndev;
686	struct device		*dev;
687	struct gbe_priv		*gbe_dev;
688	struct netcp_tx_pipe	tx_pipe;
689	struct gbe_slave	*slave;
690	struct list_head	gbe_intf_list;
691	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
692};
693
694static struct netcp_module gbe_module;
695static struct netcp_module xgbe_module;
696
697/* Statistic management */
698struct netcp_ethtool_stat {
699	char desc[ETH_GSTRING_LEN];
700	int type;
701	u32 size;
702	int offset;
703};
704
705#define GBE_STATSA_INFO(field)						\
706{									\
707	"GBE_A:"#field, GBE_STATSA_MODULE,				\
708	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
709	offsetof(struct gbe_hw_stats, field)				\
710}
711
712#define GBE_STATSB_INFO(field)						\
713{									\
714	"GBE_B:"#field, GBE_STATSB_MODULE,				\
715	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
716	offsetof(struct gbe_hw_stats, field)				\
717}
718
719#define GBE_STATSC_INFO(field)						\
720{									\
721	"GBE_C:"#field, GBE_STATSC_MODULE,				\
722	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
723	offsetof(struct gbe_hw_stats, field)				\
724}
725
726#define GBE_STATSD_INFO(field)						\
727{									\
728	"GBE_D:"#field, GBE_STATSD_MODULE,				\
729	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
730	offsetof(struct gbe_hw_stats, field)				\
731}
732
733static const struct netcp_ethtool_stat gbe13_et_stats[] = {
734	/* GBE module A */
735	GBE_STATSA_INFO(rx_good_frames),
736	GBE_STATSA_INFO(rx_broadcast_frames),
737	GBE_STATSA_INFO(rx_multicast_frames),
738	GBE_STATSA_INFO(rx_pause_frames),
739	GBE_STATSA_INFO(rx_crc_errors),
740	GBE_STATSA_INFO(rx_align_code_errors),
741	GBE_STATSA_INFO(rx_oversized_frames),
742	GBE_STATSA_INFO(rx_jabber_frames),
743	GBE_STATSA_INFO(rx_undersized_frames),
744	GBE_STATSA_INFO(rx_fragments),
745	GBE_STATSA_INFO(rx_bytes),
746	GBE_STATSA_INFO(tx_good_frames),
747	GBE_STATSA_INFO(tx_broadcast_frames),
748	GBE_STATSA_INFO(tx_multicast_frames),
749	GBE_STATSA_INFO(tx_pause_frames),
750	GBE_STATSA_INFO(tx_deferred_frames),
751	GBE_STATSA_INFO(tx_collision_frames),
752	GBE_STATSA_INFO(tx_single_coll_frames),
753	GBE_STATSA_INFO(tx_mult_coll_frames),
754	GBE_STATSA_INFO(tx_excessive_collisions),
755	GBE_STATSA_INFO(tx_late_collisions),
756	GBE_STATSA_INFO(tx_underrun),
757	GBE_STATSA_INFO(tx_carrier_sense_errors),
758	GBE_STATSA_INFO(tx_bytes),
759	GBE_STATSA_INFO(tx_64byte_frames),
760	GBE_STATSA_INFO(tx_65_to_127byte_frames),
761	GBE_STATSA_INFO(tx_128_to_255byte_frames),
762	GBE_STATSA_INFO(tx_256_to_511byte_frames),
763	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
764	GBE_STATSA_INFO(tx_1024byte_frames),
765	GBE_STATSA_INFO(net_bytes),
766	GBE_STATSA_INFO(rx_sof_overruns),
767	GBE_STATSA_INFO(rx_mof_overruns),
768	GBE_STATSA_INFO(rx_dma_overruns),
769	/* GBE module B */
770	GBE_STATSB_INFO(rx_good_frames),
771	GBE_STATSB_INFO(rx_broadcast_frames),
772	GBE_STATSB_INFO(rx_multicast_frames),
773	GBE_STATSB_INFO(rx_pause_frames),
774	GBE_STATSB_INFO(rx_crc_errors),
775	GBE_STATSB_INFO(rx_align_code_errors),
776	GBE_STATSB_INFO(rx_oversized_frames),
777	GBE_STATSB_INFO(rx_jabber_frames),
778	GBE_STATSB_INFO(rx_undersized_frames),
779	GBE_STATSB_INFO(rx_fragments),
780	GBE_STATSB_INFO(rx_bytes),
781	GBE_STATSB_INFO(tx_good_frames),
782	GBE_STATSB_INFO(tx_broadcast_frames),
783	GBE_STATSB_INFO(tx_multicast_frames),
784	GBE_STATSB_INFO(tx_pause_frames),
785	GBE_STATSB_INFO(tx_deferred_frames),
786	GBE_STATSB_INFO(tx_collision_frames),
787	GBE_STATSB_INFO(tx_single_coll_frames),
788	GBE_STATSB_INFO(tx_mult_coll_frames),
789	GBE_STATSB_INFO(tx_excessive_collisions),
790	GBE_STATSB_INFO(tx_late_collisions),
791	GBE_STATSB_INFO(tx_underrun),
792	GBE_STATSB_INFO(tx_carrier_sense_errors),
793	GBE_STATSB_INFO(tx_bytes),
794	GBE_STATSB_INFO(tx_64byte_frames),
795	GBE_STATSB_INFO(tx_65_to_127byte_frames),
796	GBE_STATSB_INFO(tx_128_to_255byte_frames),
797	GBE_STATSB_INFO(tx_256_to_511byte_frames),
798	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
799	GBE_STATSB_INFO(tx_1024byte_frames),
800	GBE_STATSB_INFO(net_bytes),
801	GBE_STATSB_INFO(rx_sof_overruns),
802	GBE_STATSB_INFO(rx_mof_overruns),
803	GBE_STATSB_INFO(rx_dma_overruns),
804	/* GBE module C */
805	GBE_STATSC_INFO(rx_good_frames),
806	GBE_STATSC_INFO(rx_broadcast_frames),
807	GBE_STATSC_INFO(rx_multicast_frames),
808	GBE_STATSC_INFO(rx_pause_frames),
809	GBE_STATSC_INFO(rx_crc_errors),
810	GBE_STATSC_INFO(rx_align_code_errors),
811	GBE_STATSC_INFO(rx_oversized_frames),
812	GBE_STATSC_INFO(rx_jabber_frames),
813	GBE_STATSC_INFO(rx_undersized_frames),
814	GBE_STATSC_INFO(rx_fragments),
815	GBE_STATSC_INFO(rx_bytes),
816	GBE_STATSC_INFO(tx_good_frames),
817	GBE_STATSC_INFO(tx_broadcast_frames),
818	GBE_STATSC_INFO(tx_multicast_frames),
819	GBE_STATSC_INFO(tx_pause_frames),
820	GBE_STATSC_INFO(tx_deferred_frames),
821	GBE_STATSC_INFO(tx_collision_frames),
822	GBE_STATSC_INFO(tx_single_coll_frames),
823	GBE_STATSC_INFO(tx_mult_coll_frames),
824	GBE_STATSC_INFO(tx_excessive_collisions),
825	GBE_STATSC_INFO(tx_late_collisions),
826	GBE_STATSC_INFO(tx_underrun),
827	GBE_STATSC_INFO(tx_carrier_sense_errors),
828	GBE_STATSC_INFO(tx_bytes),
829	GBE_STATSC_INFO(tx_64byte_frames),
830	GBE_STATSC_INFO(tx_65_to_127byte_frames),
831	GBE_STATSC_INFO(tx_128_to_255byte_frames),
832	GBE_STATSC_INFO(tx_256_to_511byte_frames),
833	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
834	GBE_STATSC_INFO(tx_1024byte_frames),
835	GBE_STATSC_INFO(net_bytes),
836	GBE_STATSC_INFO(rx_sof_overruns),
837	GBE_STATSC_INFO(rx_mof_overruns),
838	GBE_STATSC_INFO(rx_dma_overruns),
839	/* GBE module D */
840	GBE_STATSD_INFO(rx_good_frames),
841	GBE_STATSD_INFO(rx_broadcast_frames),
842	GBE_STATSD_INFO(rx_multicast_frames),
843	GBE_STATSD_INFO(rx_pause_frames),
844	GBE_STATSD_INFO(rx_crc_errors),
845	GBE_STATSD_INFO(rx_align_code_errors),
846	GBE_STATSD_INFO(rx_oversized_frames),
847	GBE_STATSD_INFO(rx_jabber_frames),
848	GBE_STATSD_INFO(rx_undersized_frames),
849	GBE_STATSD_INFO(rx_fragments),
850	GBE_STATSD_INFO(rx_bytes),
851	GBE_STATSD_INFO(tx_good_frames),
852	GBE_STATSD_INFO(tx_broadcast_frames),
853	GBE_STATSD_INFO(tx_multicast_frames),
854	GBE_STATSD_INFO(tx_pause_frames),
855	GBE_STATSD_INFO(tx_deferred_frames),
856	GBE_STATSD_INFO(tx_collision_frames),
857	GBE_STATSD_INFO(tx_single_coll_frames),
858	GBE_STATSD_INFO(tx_mult_coll_frames),
859	GBE_STATSD_INFO(tx_excessive_collisions),
860	GBE_STATSD_INFO(tx_late_collisions),
861	GBE_STATSD_INFO(tx_underrun),
862	GBE_STATSD_INFO(tx_carrier_sense_errors),
863	GBE_STATSD_INFO(tx_bytes),
864	GBE_STATSD_INFO(tx_64byte_frames),
865	GBE_STATSD_INFO(tx_65_to_127byte_frames),
866	GBE_STATSD_INFO(tx_128_to_255byte_frames),
867	GBE_STATSD_INFO(tx_256_to_511byte_frames),
868	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
869	GBE_STATSD_INFO(tx_1024byte_frames),
870	GBE_STATSD_INFO(net_bytes),
871	GBE_STATSD_INFO(rx_sof_overruns),
872	GBE_STATSD_INFO(rx_mof_overruns),
873	GBE_STATSD_INFO(rx_dma_overruns),
874};
875
876/* This is the size of entries in GBENU_STATS_HOST */
877#define GBENU_ET_STATS_HOST_SIZE	33
878
879#define GBENU_STATS_HOST(field)					\
880{								\
881	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
882	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
883	offsetof(struct gbenu_hw_stats, field)			\
884}
885
886/* This is the size of entries in GBENU_STATS_HOST */
887#define GBENU_ET_STATS_PORT_SIZE	46
888
889#define GBENU_STATS_P1(field)					\
890{								\
891	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
892	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
893	offsetof(struct gbenu_hw_stats, field)			\
894}
895
896#define GBENU_STATS_P2(field)					\
897{								\
898	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
899	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
900	offsetof(struct gbenu_hw_stats, field)			\
901}
902
903#define GBENU_STATS_P3(field)					\
904{								\
905	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
906	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
907	offsetof(struct gbenu_hw_stats, field)			\
908}
909
910#define GBENU_STATS_P4(field)					\
911{								\
912	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
913	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
914	offsetof(struct gbenu_hw_stats, field)			\
915}
916
917#define GBENU_STATS_P5(field)					\
918{								\
919	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
920	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
921	offsetof(struct gbenu_hw_stats, field)			\
922}
923
924#define GBENU_STATS_P6(field)					\
925{								\
926	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
927	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
928	offsetof(struct gbenu_hw_stats, field)			\
929}
930
931#define GBENU_STATS_P7(field)					\
932{								\
933	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
934	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
935	offsetof(struct gbenu_hw_stats, field)			\
936}
937
938#define GBENU_STATS_P8(field)					\
939{								\
940	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
941	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
942	offsetof(struct gbenu_hw_stats, field)			\
943}
944
945static const struct netcp_ethtool_stat gbenu_et_stats[] = {
946	/* GBENU Host Module */
947	GBENU_STATS_HOST(rx_good_frames),
948	GBENU_STATS_HOST(rx_broadcast_frames),
949	GBENU_STATS_HOST(rx_multicast_frames),
950	GBENU_STATS_HOST(rx_crc_errors),
951	GBENU_STATS_HOST(rx_oversized_frames),
952	GBENU_STATS_HOST(rx_undersized_frames),
953	GBENU_STATS_HOST(ale_drop),
954	GBENU_STATS_HOST(ale_overrun_drop),
955	GBENU_STATS_HOST(rx_bytes),
956	GBENU_STATS_HOST(tx_good_frames),
957	GBENU_STATS_HOST(tx_broadcast_frames),
958	GBENU_STATS_HOST(tx_multicast_frames),
959	GBENU_STATS_HOST(tx_bytes),
960	GBENU_STATS_HOST(tx_64B_frames),
961	GBENU_STATS_HOST(tx_65_to_127B_frames),
962	GBENU_STATS_HOST(tx_128_to_255B_frames),
963	GBENU_STATS_HOST(tx_256_to_511B_frames),
964	GBENU_STATS_HOST(tx_512_to_1023B_frames),
965	GBENU_STATS_HOST(tx_1024B_frames),
966	GBENU_STATS_HOST(net_bytes),
967	GBENU_STATS_HOST(rx_bottom_fifo_drop),
968	GBENU_STATS_HOST(rx_port_mask_drop),
969	GBENU_STATS_HOST(rx_top_fifo_drop),
970	GBENU_STATS_HOST(ale_rate_limit_drop),
971	GBENU_STATS_HOST(ale_vid_ingress_drop),
972	GBENU_STATS_HOST(ale_da_eq_sa_drop),
973	GBENU_STATS_HOST(ale_unknown_ucast),
974	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
975	GBENU_STATS_HOST(ale_unknown_mcast),
976	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
977	GBENU_STATS_HOST(ale_unknown_bcast),
978	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
979	GBENU_STATS_HOST(tx_mem_protect_err),
980	/* GBENU Module 1 */
981	GBENU_STATS_P1(rx_good_frames),
982	GBENU_STATS_P1(rx_broadcast_frames),
983	GBENU_STATS_P1(rx_multicast_frames),
984	GBENU_STATS_P1(rx_pause_frames),
985	GBENU_STATS_P1(rx_crc_errors),
986	GBENU_STATS_P1(rx_align_code_errors),
987	GBENU_STATS_P1(rx_oversized_frames),
988	GBENU_STATS_P1(rx_jabber_frames),
989	GBENU_STATS_P1(rx_undersized_frames),
990	GBENU_STATS_P1(rx_fragments),
991	GBENU_STATS_P1(ale_drop),
992	GBENU_STATS_P1(ale_overrun_drop),
993	GBENU_STATS_P1(rx_bytes),
994	GBENU_STATS_P1(tx_good_frames),
995	GBENU_STATS_P1(tx_broadcast_frames),
996	GBENU_STATS_P1(tx_multicast_frames),
997	GBENU_STATS_P1(tx_pause_frames),
998	GBENU_STATS_P1(tx_deferred_frames),
999	GBENU_STATS_P1(tx_collision_frames),
1000	GBENU_STATS_P1(tx_single_coll_frames),
1001	GBENU_STATS_P1(tx_mult_coll_frames),
1002	GBENU_STATS_P1(tx_excessive_collisions),
1003	GBENU_STATS_P1(tx_late_collisions),
1004	GBENU_STATS_P1(rx_ipg_error),
1005	GBENU_STATS_P1(tx_carrier_sense_errors),
1006	GBENU_STATS_P1(tx_bytes),
1007	GBENU_STATS_P1(tx_64B_frames),
1008	GBENU_STATS_P1(tx_65_to_127B_frames),
1009	GBENU_STATS_P1(tx_128_to_255B_frames),
1010	GBENU_STATS_P1(tx_256_to_511B_frames),
1011	GBENU_STATS_P1(tx_512_to_1023B_frames),
1012	GBENU_STATS_P1(tx_1024B_frames),
1013	GBENU_STATS_P1(net_bytes),
1014	GBENU_STATS_P1(rx_bottom_fifo_drop),
1015	GBENU_STATS_P1(rx_port_mask_drop),
1016	GBENU_STATS_P1(rx_top_fifo_drop),
1017	GBENU_STATS_P1(ale_rate_limit_drop),
1018	GBENU_STATS_P1(ale_vid_ingress_drop),
1019	GBENU_STATS_P1(ale_da_eq_sa_drop),
1020	GBENU_STATS_P1(ale_unknown_ucast),
1021	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1022	GBENU_STATS_P1(ale_unknown_mcast),
1023	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1024	GBENU_STATS_P1(ale_unknown_bcast),
1025	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1026	GBENU_STATS_P1(tx_mem_protect_err),
1027	/* GBENU Module 2 */
1028	GBENU_STATS_P2(rx_good_frames),
1029	GBENU_STATS_P2(rx_broadcast_frames),
1030	GBENU_STATS_P2(rx_multicast_frames),
1031	GBENU_STATS_P2(rx_pause_frames),
1032	GBENU_STATS_P2(rx_crc_errors),
1033	GBENU_STATS_P2(rx_align_code_errors),
1034	GBENU_STATS_P2(rx_oversized_frames),
1035	GBENU_STATS_P2(rx_jabber_frames),
1036	GBENU_STATS_P2(rx_undersized_frames),
1037	GBENU_STATS_P2(rx_fragments),
1038	GBENU_STATS_P2(ale_drop),
1039	GBENU_STATS_P2(ale_overrun_drop),
1040	GBENU_STATS_P2(rx_bytes),
1041	GBENU_STATS_P2(tx_good_frames),
1042	GBENU_STATS_P2(tx_broadcast_frames),
1043	GBENU_STATS_P2(tx_multicast_frames),
1044	GBENU_STATS_P2(tx_pause_frames),
1045	GBENU_STATS_P2(tx_deferred_frames),
1046	GBENU_STATS_P2(tx_collision_frames),
1047	GBENU_STATS_P2(tx_single_coll_frames),
1048	GBENU_STATS_P2(tx_mult_coll_frames),
1049	GBENU_STATS_P2(tx_excessive_collisions),
1050	GBENU_STATS_P2(tx_late_collisions),
1051	GBENU_STATS_P2(rx_ipg_error),
1052	GBENU_STATS_P2(tx_carrier_sense_errors),
1053	GBENU_STATS_P2(tx_bytes),
1054	GBENU_STATS_P2(tx_64B_frames),
1055	GBENU_STATS_P2(tx_65_to_127B_frames),
1056	GBENU_STATS_P2(tx_128_to_255B_frames),
1057	GBENU_STATS_P2(tx_256_to_511B_frames),
1058	GBENU_STATS_P2(tx_512_to_1023B_frames),
1059	GBENU_STATS_P2(tx_1024B_frames),
1060	GBENU_STATS_P2(net_bytes),
1061	GBENU_STATS_P2(rx_bottom_fifo_drop),
1062	GBENU_STATS_P2(rx_port_mask_drop),
1063	GBENU_STATS_P2(rx_top_fifo_drop),
1064	GBENU_STATS_P2(ale_rate_limit_drop),
1065	GBENU_STATS_P2(ale_vid_ingress_drop),
1066	GBENU_STATS_P2(ale_da_eq_sa_drop),
1067	GBENU_STATS_P2(ale_unknown_ucast),
1068	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1069	GBENU_STATS_P2(ale_unknown_mcast),
1070	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1071	GBENU_STATS_P2(ale_unknown_bcast),
1072	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1073	GBENU_STATS_P2(tx_mem_protect_err),
1074	/* GBENU Module 3 */
1075	GBENU_STATS_P3(rx_good_frames),
1076	GBENU_STATS_P3(rx_broadcast_frames),
1077	GBENU_STATS_P3(rx_multicast_frames),
1078	GBENU_STATS_P3(rx_pause_frames),
1079	GBENU_STATS_P3(rx_crc_errors),
1080	GBENU_STATS_P3(rx_align_code_errors),
1081	GBENU_STATS_P3(rx_oversized_frames),
1082	GBENU_STATS_P3(rx_jabber_frames),
1083	GBENU_STATS_P3(rx_undersized_frames),
1084	GBENU_STATS_P3(rx_fragments),
1085	GBENU_STATS_P3(ale_drop),
1086	GBENU_STATS_P3(ale_overrun_drop),
1087	GBENU_STATS_P3(rx_bytes),
1088	GBENU_STATS_P3(tx_good_frames),
1089	GBENU_STATS_P3(tx_broadcast_frames),
1090	GBENU_STATS_P3(tx_multicast_frames),
1091	GBENU_STATS_P3(tx_pause_frames),
1092	GBENU_STATS_P3(tx_deferred_frames),
1093	GBENU_STATS_P3(tx_collision_frames),
1094	GBENU_STATS_P3(tx_single_coll_frames),
1095	GBENU_STATS_P3(tx_mult_coll_frames),
1096	GBENU_STATS_P3(tx_excessive_collisions),
1097	GBENU_STATS_P3(tx_late_collisions),
1098	GBENU_STATS_P3(rx_ipg_error),
1099	GBENU_STATS_P3(tx_carrier_sense_errors),
1100	GBENU_STATS_P3(tx_bytes),
1101	GBENU_STATS_P3(tx_64B_frames),
1102	GBENU_STATS_P3(tx_65_to_127B_frames),
1103	GBENU_STATS_P3(tx_128_to_255B_frames),
1104	GBENU_STATS_P3(tx_256_to_511B_frames),
1105	GBENU_STATS_P3(tx_512_to_1023B_frames),
1106	GBENU_STATS_P3(tx_1024B_frames),
1107	GBENU_STATS_P3(net_bytes),
1108	GBENU_STATS_P3(rx_bottom_fifo_drop),
1109	GBENU_STATS_P3(rx_port_mask_drop),
1110	GBENU_STATS_P3(rx_top_fifo_drop),
1111	GBENU_STATS_P3(ale_rate_limit_drop),
1112	GBENU_STATS_P3(ale_vid_ingress_drop),
1113	GBENU_STATS_P3(ale_da_eq_sa_drop),
1114	GBENU_STATS_P3(ale_unknown_ucast),
1115	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1116	GBENU_STATS_P3(ale_unknown_mcast),
1117	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1118	GBENU_STATS_P3(ale_unknown_bcast),
1119	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1120	GBENU_STATS_P3(tx_mem_protect_err),
1121	/* GBENU Module 4 */
1122	GBENU_STATS_P4(rx_good_frames),
1123	GBENU_STATS_P4(rx_broadcast_frames),
1124	GBENU_STATS_P4(rx_multicast_frames),
1125	GBENU_STATS_P4(rx_pause_frames),
1126	GBENU_STATS_P4(rx_crc_errors),
1127	GBENU_STATS_P4(rx_align_code_errors),
1128	GBENU_STATS_P4(rx_oversized_frames),
1129	GBENU_STATS_P4(rx_jabber_frames),
1130	GBENU_STATS_P4(rx_undersized_frames),
1131	GBENU_STATS_P4(rx_fragments),
1132	GBENU_STATS_P4(ale_drop),
1133	GBENU_STATS_P4(ale_overrun_drop),
1134	GBENU_STATS_P4(rx_bytes),
1135	GBENU_STATS_P4(tx_good_frames),
1136	GBENU_STATS_P4(tx_broadcast_frames),
1137	GBENU_STATS_P4(tx_multicast_frames),
1138	GBENU_STATS_P4(tx_pause_frames),
1139	GBENU_STATS_P4(tx_deferred_frames),
1140	GBENU_STATS_P4(tx_collision_frames),
1141	GBENU_STATS_P4(tx_single_coll_frames),
1142	GBENU_STATS_P4(tx_mult_coll_frames),
1143	GBENU_STATS_P4(tx_excessive_collisions),
1144	GBENU_STATS_P4(tx_late_collisions),
1145	GBENU_STATS_P4(rx_ipg_error),
1146	GBENU_STATS_P4(tx_carrier_sense_errors),
1147	GBENU_STATS_P4(tx_bytes),
1148	GBENU_STATS_P4(tx_64B_frames),
1149	GBENU_STATS_P4(tx_65_to_127B_frames),
1150	GBENU_STATS_P4(tx_128_to_255B_frames),
1151	GBENU_STATS_P4(tx_256_to_511B_frames),
1152	GBENU_STATS_P4(tx_512_to_1023B_frames),
1153	GBENU_STATS_P4(tx_1024B_frames),
1154	GBENU_STATS_P4(net_bytes),
1155	GBENU_STATS_P4(rx_bottom_fifo_drop),
1156	GBENU_STATS_P4(rx_port_mask_drop),
1157	GBENU_STATS_P4(rx_top_fifo_drop),
1158	GBENU_STATS_P4(ale_rate_limit_drop),
1159	GBENU_STATS_P4(ale_vid_ingress_drop),
1160	GBENU_STATS_P4(ale_da_eq_sa_drop),
1161	GBENU_STATS_P4(ale_unknown_ucast),
1162	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1163	GBENU_STATS_P4(ale_unknown_mcast),
1164	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1165	GBENU_STATS_P4(ale_unknown_bcast),
1166	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1167	GBENU_STATS_P4(tx_mem_protect_err),
1168	/* GBENU Module 5 */
1169	GBENU_STATS_P5(rx_good_frames),
1170	GBENU_STATS_P5(rx_broadcast_frames),
1171	GBENU_STATS_P5(rx_multicast_frames),
1172	GBENU_STATS_P5(rx_pause_frames),
1173	GBENU_STATS_P5(rx_crc_errors),
1174	GBENU_STATS_P5(rx_align_code_errors),
1175	GBENU_STATS_P5(rx_oversized_frames),
1176	GBENU_STATS_P5(rx_jabber_frames),
1177	GBENU_STATS_P5(rx_undersized_frames),
1178	GBENU_STATS_P5(rx_fragments),
1179	GBENU_STATS_P5(ale_drop),
1180	GBENU_STATS_P5(ale_overrun_drop),
1181	GBENU_STATS_P5(rx_bytes),
1182	GBENU_STATS_P5(tx_good_frames),
1183	GBENU_STATS_P5(tx_broadcast_frames),
1184	GBENU_STATS_P5(tx_multicast_frames),
1185	GBENU_STATS_P5(tx_pause_frames),
1186	GBENU_STATS_P5(tx_deferred_frames),
1187	GBENU_STATS_P5(tx_collision_frames),
1188	GBENU_STATS_P5(tx_single_coll_frames),
1189	GBENU_STATS_P5(tx_mult_coll_frames),
1190	GBENU_STATS_P5(tx_excessive_collisions),
1191	GBENU_STATS_P5(tx_late_collisions),
1192	GBENU_STATS_P5(rx_ipg_error),
1193	GBENU_STATS_P5(tx_carrier_sense_errors),
1194	GBENU_STATS_P5(tx_bytes),
1195	GBENU_STATS_P5(tx_64B_frames),
1196	GBENU_STATS_P5(tx_65_to_127B_frames),
1197	GBENU_STATS_P5(tx_128_to_255B_frames),
1198	GBENU_STATS_P5(tx_256_to_511B_frames),
1199	GBENU_STATS_P5(tx_512_to_1023B_frames),
1200	GBENU_STATS_P5(tx_1024B_frames),
1201	GBENU_STATS_P5(net_bytes),
1202	GBENU_STATS_P5(rx_bottom_fifo_drop),
1203	GBENU_STATS_P5(rx_port_mask_drop),
1204	GBENU_STATS_P5(rx_top_fifo_drop),
1205	GBENU_STATS_P5(ale_rate_limit_drop),
1206	GBENU_STATS_P5(ale_vid_ingress_drop),
1207	GBENU_STATS_P5(ale_da_eq_sa_drop),
1208	GBENU_STATS_P5(ale_unknown_ucast),
1209	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1210	GBENU_STATS_P5(ale_unknown_mcast),
1211	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1212	GBENU_STATS_P5(ale_unknown_bcast),
1213	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1214	GBENU_STATS_P5(tx_mem_protect_err),
1215	/* GBENU Module 6 */
1216	GBENU_STATS_P6(rx_good_frames),
1217	GBENU_STATS_P6(rx_broadcast_frames),
1218	GBENU_STATS_P6(rx_multicast_frames),
1219	GBENU_STATS_P6(rx_pause_frames),
1220	GBENU_STATS_P6(rx_crc_errors),
1221	GBENU_STATS_P6(rx_align_code_errors),
1222	GBENU_STATS_P6(rx_oversized_frames),
1223	GBENU_STATS_P6(rx_jabber_frames),
1224	GBENU_STATS_P6(rx_undersized_frames),
1225	GBENU_STATS_P6(rx_fragments),
1226	GBENU_STATS_P6(ale_drop),
1227	GBENU_STATS_P6(ale_overrun_drop),
1228	GBENU_STATS_P6(rx_bytes),
1229	GBENU_STATS_P6(tx_good_frames),
1230	GBENU_STATS_P6(tx_broadcast_frames),
1231	GBENU_STATS_P6(tx_multicast_frames),
1232	GBENU_STATS_P6(tx_pause_frames),
1233	GBENU_STATS_P6(tx_deferred_frames),
1234	GBENU_STATS_P6(tx_collision_frames),
1235	GBENU_STATS_P6(tx_single_coll_frames),
1236	GBENU_STATS_P6(tx_mult_coll_frames),
1237	GBENU_STATS_P6(tx_excessive_collisions),
1238	GBENU_STATS_P6(tx_late_collisions),
1239	GBENU_STATS_P6(rx_ipg_error),
1240	GBENU_STATS_P6(tx_carrier_sense_errors),
1241	GBENU_STATS_P6(tx_bytes),
1242	GBENU_STATS_P6(tx_64B_frames),
1243	GBENU_STATS_P6(tx_65_to_127B_frames),
1244	GBENU_STATS_P6(tx_128_to_255B_frames),
1245	GBENU_STATS_P6(tx_256_to_511B_frames),
1246	GBENU_STATS_P6(tx_512_to_1023B_frames),
1247	GBENU_STATS_P6(tx_1024B_frames),
1248	GBENU_STATS_P6(net_bytes),
1249	GBENU_STATS_P6(rx_bottom_fifo_drop),
1250	GBENU_STATS_P6(rx_port_mask_drop),
1251	GBENU_STATS_P6(rx_top_fifo_drop),
1252	GBENU_STATS_P6(ale_rate_limit_drop),
1253	GBENU_STATS_P6(ale_vid_ingress_drop),
1254	GBENU_STATS_P6(ale_da_eq_sa_drop),
1255	GBENU_STATS_P6(ale_unknown_ucast),
1256	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1257	GBENU_STATS_P6(ale_unknown_mcast),
1258	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1259	GBENU_STATS_P6(ale_unknown_bcast),
1260	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1261	GBENU_STATS_P6(tx_mem_protect_err),
1262	/* GBENU Module 7 */
1263	GBENU_STATS_P7(rx_good_frames),
1264	GBENU_STATS_P7(rx_broadcast_frames),
1265	GBENU_STATS_P7(rx_multicast_frames),
1266	GBENU_STATS_P7(rx_pause_frames),
1267	GBENU_STATS_P7(rx_crc_errors),
1268	GBENU_STATS_P7(rx_align_code_errors),
1269	GBENU_STATS_P7(rx_oversized_frames),
1270	GBENU_STATS_P7(rx_jabber_frames),
1271	GBENU_STATS_P7(rx_undersized_frames),
1272	GBENU_STATS_P7(rx_fragments),
1273	GBENU_STATS_P7(ale_drop),
1274	GBENU_STATS_P7(ale_overrun_drop),
1275	GBENU_STATS_P7(rx_bytes),
1276	GBENU_STATS_P7(tx_good_frames),
1277	GBENU_STATS_P7(tx_broadcast_frames),
1278	GBENU_STATS_P7(tx_multicast_frames),
1279	GBENU_STATS_P7(tx_pause_frames),
1280	GBENU_STATS_P7(tx_deferred_frames),
1281	GBENU_STATS_P7(tx_collision_frames),
1282	GBENU_STATS_P7(tx_single_coll_frames),
1283	GBENU_STATS_P7(tx_mult_coll_frames),
1284	GBENU_STATS_P7(tx_excessive_collisions),
1285	GBENU_STATS_P7(tx_late_collisions),
1286	GBENU_STATS_P7(rx_ipg_error),
1287	GBENU_STATS_P7(tx_carrier_sense_errors),
1288	GBENU_STATS_P7(tx_bytes),
1289	GBENU_STATS_P7(tx_64B_frames),
1290	GBENU_STATS_P7(tx_65_to_127B_frames),
1291	GBENU_STATS_P7(tx_128_to_255B_frames),
1292	GBENU_STATS_P7(tx_256_to_511B_frames),
1293	GBENU_STATS_P7(tx_512_to_1023B_frames),
1294	GBENU_STATS_P7(tx_1024B_frames),
1295	GBENU_STATS_P7(net_bytes),
1296	GBENU_STATS_P7(rx_bottom_fifo_drop),
1297	GBENU_STATS_P7(rx_port_mask_drop),
1298	GBENU_STATS_P7(rx_top_fifo_drop),
1299	GBENU_STATS_P7(ale_rate_limit_drop),
1300	GBENU_STATS_P7(ale_vid_ingress_drop),
1301	GBENU_STATS_P7(ale_da_eq_sa_drop),
1302	GBENU_STATS_P7(ale_unknown_ucast),
1303	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1304	GBENU_STATS_P7(ale_unknown_mcast),
1305	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1306	GBENU_STATS_P7(ale_unknown_bcast),
1307	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1308	GBENU_STATS_P7(tx_mem_protect_err),
1309	/* GBENU Module 8 */
1310	GBENU_STATS_P8(rx_good_frames),
1311	GBENU_STATS_P8(rx_broadcast_frames),
1312	GBENU_STATS_P8(rx_multicast_frames),
1313	GBENU_STATS_P8(rx_pause_frames),
1314	GBENU_STATS_P8(rx_crc_errors),
1315	GBENU_STATS_P8(rx_align_code_errors),
1316	GBENU_STATS_P8(rx_oversized_frames),
1317	GBENU_STATS_P8(rx_jabber_frames),
1318	GBENU_STATS_P8(rx_undersized_frames),
1319	GBENU_STATS_P8(rx_fragments),
1320	GBENU_STATS_P8(ale_drop),
1321	GBENU_STATS_P8(ale_overrun_drop),
1322	GBENU_STATS_P8(rx_bytes),
1323	GBENU_STATS_P8(tx_good_frames),
1324	GBENU_STATS_P8(tx_broadcast_frames),
1325	GBENU_STATS_P8(tx_multicast_frames),
1326	GBENU_STATS_P8(tx_pause_frames),
1327	GBENU_STATS_P8(tx_deferred_frames),
1328	GBENU_STATS_P8(tx_collision_frames),
1329	GBENU_STATS_P8(tx_single_coll_frames),
1330	GBENU_STATS_P8(tx_mult_coll_frames),
1331	GBENU_STATS_P8(tx_excessive_collisions),
1332	GBENU_STATS_P8(tx_late_collisions),
1333	GBENU_STATS_P8(rx_ipg_error),
1334	GBENU_STATS_P8(tx_carrier_sense_errors),
1335	GBENU_STATS_P8(tx_bytes),
1336	GBENU_STATS_P8(tx_64B_frames),
1337	GBENU_STATS_P8(tx_65_to_127B_frames),
1338	GBENU_STATS_P8(tx_128_to_255B_frames),
1339	GBENU_STATS_P8(tx_256_to_511B_frames),
1340	GBENU_STATS_P8(tx_512_to_1023B_frames),
1341	GBENU_STATS_P8(tx_1024B_frames),
1342	GBENU_STATS_P8(net_bytes),
1343	GBENU_STATS_P8(rx_bottom_fifo_drop),
1344	GBENU_STATS_P8(rx_port_mask_drop),
1345	GBENU_STATS_P8(rx_top_fifo_drop),
1346	GBENU_STATS_P8(ale_rate_limit_drop),
1347	GBENU_STATS_P8(ale_vid_ingress_drop),
1348	GBENU_STATS_P8(ale_da_eq_sa_drop),
1349	GBENU_STATS_P8(ale_unknown_ucast),
1350	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1351	GBENU_STATS_P8(ale_unknown_mcast),
1352	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1353	GBENU_STATS_P8(ale_unknown_bcast),
1354	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1355	GBENU_STATS_P8(tx_mem_protect_err),
1356};
1357
1358#define XGBE_STATS0_INFO(field)				\
1359{							\
1360	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1361	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1362	offsetof(struct xgbe_hw_stats, field)		\
1363}
1364
1365#define XGBE_STATS1_INFO(field)				\
1366{							\
1367	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1368	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1369	offsetof(struct xgbe_hw_stats, field)		\
1370}
1371
1372#define XGBE_STATS2_INFO(field)				\
1373{							\
1374	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1375	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1376	offsetof(struct xgbe_hw_stats, field)		\
1377}
1378
1379static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1380	/* GBE module 0 */
1381	XGBE_STATS0_INFO(rx_good_frames),
1382	XGBE_STATS0_INFO(rx_broadcast_frames),
1383	XGBE_STATS0_INFO(rx_multicast_frames),
1384	XGBE_STATS0_INFO(rx_oversized_frames),
1385	XGBE_STATS0_INFO(rx_undersized_frames),
1386	XGBE_STATS0_INFO(overrun_type4),
1387	XGBE_STATS0_INFO(overrun_type5),
1388	XGBE_STATS0_INFO(rx_bytes),
1389	XGBE_STATS0_INFO(tx_good_frames),
1390	XGBE_STATS0_INFO(tx_broadcast_frames),
1391	XGBE_STATS0_INFO(tx_multicast_frames),
1392	XGBE_STATS0_INFO(tx_bytes),
1393	XGBE_STATS0_INFO(tx_64byte_frames),
1394	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1395	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1396	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1397	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1398	XGBE_STATS0_INFO(tx_1024byte_frames),
1399	XGBE_STATS0_INFO(net_bytes),
1400	XGBE_STATS0_INFO(rx_sof_overruns),
1401	XGBE_STATS0_INFO(rx_mof_overruns),
1402	XGBE_STATS0_INFO(rx_dma_overruns),
1403	/* XGBE module 1 */
1404	XGBE_STATS1_INFO(rx_good_frames),
1405	XGBE_STATS1_INFO(rx_broadcast_frames),
1406	XGBE_STATS1_INFO(rx_multicast_frames),
1407	XGBE_STATS1_INFO(rx_pause_frames),
1408	XGBE_STATS1_INFO(rx_crc_errors),
1409	XGBE_STATS1_INFO(rx_align_code_errors),
1410	XGBE_STATS1_INFO(rx_oversized_frames),
1411	XGBE_STATS1_INFO(rx_jabber_frames),
1412	XGBE_STATS1_INFO(rx_undersized_frames),
1413	XGBE_STATS1_INFO(rx_fragments),
1414	XGBE_STATS1_INFO(overrun_type4),
1415	XGBE_STATS1_INFO(overrun_type5),
1416	XGBE_STATS1_INFO(rx_bytes),
1417	XGBE_STATS1_INFO(tx_good_frames),
1418	XGBE_STATS1_INFO(tx_broadcast_frames),
1419	XGBE_STATS1_INFO(tx_multicast_frames),
1420	XGBE_STATS1_INFO(tx_pause_frames),
1421	XGBE_STATS1_INFO(tx_deferred_frames),
1422	XGBE_STATS1_INFO(tx_collision_frames),
1423	XGBE_STATS1_INFO(tx_single_coll_frames),
1424	XGBE_STATS1_INFO(tx_mult_coll_frames),
1425	XGBE_STATS1_INFO(tx_excessive_collisions),
1426	XGBE_STATS1_INFO(tx_late_collisions),
1427	XGBE_STATS1_INFO(tx_underrun),
1428	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1429	XGBE_STATS1_INFO(tx_bytes),
1430	XGBE_STATS1_INFO(tx_64byte_frames),
1431	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1432	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1433	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1434	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1435	XGBE_STATS1_INFO(tx_1024byte_frames),
1436	XGBE_STATS1_INFO(net_bytes),
1437	XGBE_STATS1_INFO(rx_sof_overruns),
1438	XGBE_STATS1_INFO(rx_mof_overruns),
1439	XGBE_STATS1_INFO(rx_dma_overruns),
1440	/* XGBE module 2 */
1441	XGBE_STATS2_INFO(rx_good_frames),
1442	XGBE_STATS2_INFO(rx_broadcast_frames),
1443	XGBE_STATS2_INFO(rx_multicast_frames),
1444	XGBE_STATS2_INFO(rx_pause_frames),
1445	XGBE_STATS2_INFO(rx_crc_errors),
1446	XGBE_STATS2_INFO(rx_align_code_errors),
1447	XGBE_STATS2_INFO(rx_oversized_frames),
1448	XGBE_STATS2_INFO(rx_jabber_frames),
1449	XGBE_STATS2_INFO(rx_undersized_frames),
1450	XGBE_STATS2_INFO(rx_fragments),
1451	XGBE_STATS2_INFO(overrun_type4),
1452	XGBE_STATS2_INFO(overrun_type5),
1453	XGBE_STATS2_INFO(rx_bytes),
1454	XGBE_STATS2_INFO(tx_good_frames),
1455	XGBE_STATS2_INFO(tx_broadcast_frames),
1456	XGBE_STATS2_INFO(tx_multicast_frames),
1457	XGBE_STATS2_INFO(tx_pause_frames),
1458	XGBE_STATS2_INFO(tx_deferred_frames),
1459	XGBE_STATS2_INFO(tx_collision_frames),
1460	XGBE_STATS2_INFO(tx_single_coll_frames),
1461	XGBE_STATS2_INFO(tx_mult_coll_frames),
1462	XGBE_STATS2_INFO(tx_excessive_collisions),
1463	XGBE_STATS2_INFO(tx_late_collisions),
1464	XGBE_STATS2_INFO(tx_underrun),
1465	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1466	XGBE_STATS2_INFO(tx_bytes),
1467	XGBE_STATS2_INFO(tx_64byte_frames),
1468	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1469	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1470	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1471	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1472	XGBE_STATS2_INFO(tx_1024byte_frames),
1473	XGBE_STATS2_INFO(net_bytes),
1474	XGBE_STATS2_INFO(rx_sof_overruns),
1475	XGBE_STATS2_INFO(rx_mof_overruns),
1476	XGBE_STATS2_INFO(rx_dma_overruns),
1477};
1478
1479#define for_each_intf(i, priv) \
1480	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1481
1482#define for_each_sec_slave(slave, priv) \
1483	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1484
1485#define first_sec_slave(priv)					\
1486	list_first_entry(&priv->secondary_slaves, \
1487			struct gbe_slave, slave_list)
1488
1489static void keystone_get_drvinfo(struct net_device *ndev,
1490				 struct ethtool_drvinfo *info)
1491{
1492	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1493	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1494}
1495
1496static u32 keystone_get_msglevel(struct net_device *ndev)
1497{
1498	struct netcp_intf *netcp = netdev_priv(ndev);
1499
1500	return netcp->msg_enable;
1501}
1502
1503static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1504{
1505	struct netcp_intf *netcp = netdev_priv(ndev);
1506
1507	netcp->msg_enable = value;
1508}
1509
1510static void keystone_get_stat_strings(struct net_device *ndev,
1511				      uint32_t stringset, uint8_t *data)
1512{
1513	struct netcp_intf *netcp = netdev_priv(ndev);
1514	struct gbe_intf *gbe_intf;
1515	struct gbe_priv *gbe_dev;
1516	int i;
1517
1518	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1519	if (!gbe_intf)
1520		return;
1521	gbe_dev = gbe_intf->gbe_dev;
1522
1523	switch (stringset) {
1524	case ETH_SS_STATS:
1525		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1526			memcpy(data, gbe_dev->et_stats[i].desc,
1527			       ETH_GSTRING_LEN);
1528			data += ETH_GSTRING_LEN;
1529		}
1530		break;
1531	case ETH_SS_TEST:
1532		break;
1533	}
1534}
1535
1536static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1537{
1538	struct netcp_intf *netcp = netdev_priv(ndev);
1539	struct gbe_intf *gbe_intf;
1540	struct gbe_priv *gbe_dev;
1541
1542	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1543	if (!gbe_intf)
1544		return -EINVAL;
1545	gbe_dev = gbe_intf->gbe_dev;
1546
1547	switch (stringset) {
1548	case ETH_SS_TEST:
1549		return 0;
1550	case ETH_SS_STATS:
1551		return gbe_dev->num_et_stats;
1552	default:
1553		return -EINVAL;
1554	}
1555}
1556
1557static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1558{
1559	void __iomem *base = NULL;
1560	u32  __iomem *p;
1561	u32 tmp = 0;
1562	int i;
1563
1564	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1565		base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
1566		p = base + gbe_dev->et_stats[i].offset;
1567		tmp = readl(p);
1568		gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
1569		if (data)
1570			data[i] = gbe_dev->hw_stats[i];
1571		/* write-to-decrement:
1572		 * new register value = old register value - write value
1573		 */
1574		writel(tmp, p);
1575	}
1576}
1577
1578static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1579{
1580	void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
1581	void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
1582	u64 *hw_stats = &gbe_dev->hw_stats[0];
1583	void __iomem *base = NULL;
1584	u32  __iomem *p;
1585	u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
1586	int i, j, pair;
1587
1588	for (pair = 0; pair < 2; pair++) {
1589		val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1590
1591		if (pair == 0)
1592			val &= ~GBE_STATS_CD_SEL;
1593		else
1594			val |= GBE_STATS_CD_SEL;
1595
1596		/* make the stat modules visible */
1597		writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1598
1599		for (i = 0; i < pair_size; i++) {
1600			j = pair * pair_size + i;
1601			switch (gbe_dev->et_stats[j].type) {
1602			case GBE_STATSA_MODULE:
1603			case GBE_STATSC_MODULE:
1604				base = gbe_statsa;
1605			break;
1606			case GBE_STATSB_MODULE:
1607			case GBE_STATSD_MODULE:
1608				base  = gbe_statsb;
1609			break;
1610			}
1611
1612			p = base + gbe_dev->et_stats[j].offset;
1613			tmp = readl(p);
1614			hw_stats[j] += tmp;
1615			if (data)
1616				data[j] = hw_stats[j];
1617			/* write-to-decrement:
1618			 * new register value = old register value - write value
1619			 */
1620			writel(tmp, p);
1621		}
1622	}
1623}
1624
1625static void keystone_get_ethtool_stats(struct net_device *ndev,
1626				       struct ethtool_stats *stats,
1627				       uint64_t *data)
1628{
1629	struct netcp_intf *netcp = netdev_priv(ndev);
1630	struct gbe_intf *gbe_intf;
1631	struct gbe_priv *gbe_dev;
1632
1633	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1634	if (!gbe_intf)
1635		return;
1636
1637	gbe_dev = gbe_intf->gbe_dev;
1638	spin_lock_bh(&gbe_dev->hw_stats_lock);
1639	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1640		gbe_update_stats_ver14(gbe_dev, data);
1641	else
1642		gbe_update_stats(gbe_dev, data);
1643	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1644}
1645
1646static int keystone_get_settings(struct net_device *ndev,
1647				 struct ethtool_cmd *cmd)
1648{
1649	struct netcp_intf *netcp = netdev_priv(ndev);
1650	struct phy_device *phy = ndev->phydev;
1651	struct gbe_intf *gbe_intf;
1652	int ret;
1653
1654	if (!phy)
1655		return -EINVAL;
1656
1657	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1658	if (!gbe_intf)
1659		return -EINVAL;
1660
1661	if (!gbe_intf->slave)
1662		return -EINVAL;
1663
1664	ret = phy_ethtool_gset(phy, cmd);
1665	if (!ret)
1666		cmd->port = gbe_intf->slave->phy_port_t;
1667
1668	return ret;
1669}
1670
1671static int keystone_set_settings(struct net_device *ndev,
1672				 struct ethtool_cmd *cmd)
1673{
1674	struct netcp_intf *netcp = netdev_priv(ndev);
1675	struct phy_device *phy = ndev->phydev;
1676	struct gbe_intf *gbe_intf;
1677	u32 features = cmd->advertising & cmd->supported;
1678
1679	if (!phy)
1680		return -EINVAL;
1681
1682	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1683	if (!gbe_intf)
1684		return -EINVAL;
1685
1686	if (!gbe_intf->slave)
1687		return -EINVAL;
1688
1689	if (cmd->port != gbe_intf->slave->phy_port_t) {
1690		if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1691			return -EINVAL;
1692
1693		if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1694			return -EINVAL;
1695
1696		if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1697			return -EINVAL;
1698
1699		if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1700			return -EINVAL;
1701
1702		if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1703			return -EINVAL;
1704	}
1705
1706	gbe_intf->slave->phy_port_t = cmd->port;
1707	return phy_ethtool_sset(phy, cmd);
1708}
1709
1710static const struct ethtool_ops keystone_ethtool_ops = {
1711	.get_drvinfo		= keystone_get_drvinfo,
1712	.get_link		= ethtool_op_get_link,
1713	.get_msglevel		= keystone_get_msglevel,
1714	.set_msglevel		= keystone_set_msglevel,
1715	.get_strings		= keystone_get_stat_strings,
1716	.get_sset_count		= keystone_get_sset_count,
1717	.get_ethtool_stats	= keystone_get_ethtool_stats,
1718	.get_settings		= keystone_get_settings,
1719	.set_settings		= keystone_set_settings,
1720};
1721
1722#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
1723			 ((mac)[2] << 16) | ((mac)[3] << 24))
1724#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
1725
1726static void gbe_set_slave_mac(struct gbe_slave *slave,
1727			      struct gbe_intf *gbe_intf)
1728{
1729	struct net_device *ndev = gbe_intf->ndev;
1730
1731	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1732	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1733}
1734
1735static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1736{
1737	if (priv->host_port == 0)
1738		return slave_num + 1;
1739
1740	return slave_num;
1741}
1742
1743static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1744					  struct net_device *ndev,
1745					  struct gbe_slave *slave,
1746					  int up)
1747{
1748	struct phy_device *phy = slave->phy;
1749	u32 mac_control = 0;
1750
1751	if (up) {
1752		mac_control = slave->mac_control;
1753		if (phy && (phy->speed == SPEED_1000)) {
1754			mac_control |= MACSL_GIG_MODE;
1755			mac_control &= ~MACSL_XGIG_MODE;
1756		} else if (phy && (phy->speed == SPEED_10000)) {
1757			mac_control |= MACSL_XGIG_MODE;
1758			mac_control &= ~MACSL_GIG_MODE;
1759		}
1760
1761		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1762						 mac_control));
1763
1764		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1765				     ALE_PORT_STATE,
1766				     ALE_PORT_STATE_FORWARD);
1767
1768		if (ndev && slave->open &&
1769		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1770		    slave->link_interface != XGMII_LINK_MAC_PHY)
1771			netif_carrier_on(ndev);
1772	} else {
1773		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1774						 mac_control));
1775		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1776				     ALE_PORT_STATE,
1777				     ALE_PORT_STATE_DISABLE);
1778		if (ndev &&
1779		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1780		    slave->link_interface != XGMII_LINK_MAC_PHY)
1781			netif_carrier_off(ndev);
1782	}
1783
1784	if (phy)
1785		phy_print_status(phy);
1786}
1787
1788static bool gbe_phy_link_status(struct gbe_slave *slave)
1789{
1790	 return !slave->phy || slave->phy->link;
1791}
1792
1793static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1794					  struct gbe_slave *slave,
1795					  struct net_device *ndev)
1796{
1797	int sp = slave->slave_num;
1798	int phy_link_state, sgmii_link_state = 1, link_state;
1799
1800	if (!slave->open)
1801		return;
1802
1803	if (!SLAVE_LINK_IS_XGMII(slave)) {
1804		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1805			sgmii_link_state =
1806				netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
1807		else
1808			sgmii_link_state =
1809				netcp_sgmii_get_port_link(
1810						gbe_dev->sgmii_port_regs, sp);
1811	}
1812
1813	phy_link_state = gbe_phy_link_status(slave);
1814	link_state = phy_link_state & sgmii_link_state;
1815
1816	if (atomic_xchg(&slave->link_state, link_state) != link_state)
1817		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1818					      link_state);
1819}
1820
1821static void xgbe_adjust_link(struct net_device *ndev)
1822{
1823	struct netcp_intf *netcp = netdev_priv(ndev);
1824	struct gbe_intf *gbe_intf;
1825
1826	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1827	if (!gbe_intf)
1828		return;
1829
1830	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1831				      ndev);
1832}
1833
1834static void gbe_adjust_link(struct net_device *ndev)
1835{
1836	struct netcp_intf *netcp = netdev_priv(ndev);
1837	struct gbe_intf *gbe_intf;
1838
1839	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1840	if (!gbe_intf)
1841		return;
1842
1843	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1844				      ndev);
1845}
1846
1847static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1848{
1849	struct gbe_priv *gbe_dev = netdev_priv(ndev);
1850	struct gbe_slave *slave;
1851
1852	for_each_sec_slave(slave, gbe_dev)
1853		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1854}
1855
1856/* Reset EMAC
1857 * Soft reset is set and polled until clear, or until a timeout occurs
1858 */
1859static int gbe_port_reset(struct gbe_slave *slave)
1860{
1861	u32 i, v;
1862
1863	/* Set the soft reset bit */
1864	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1865
1866	/* Wait for the bit to clear */
1867	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1868		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1869		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1870			return 0;
1871	}
1872
1873	/* Timeout on the reset */
1874	return GMACSL_RET_WARN_RESET_INCOMPLETE;
1875}
1876
1877/* Configure EMAC */
1878static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1879			    int max_rx_len)
1880{
1881	void __iomem *rx_maxlen_reg;
1882	u32 xgmii_mode;
1883
1884	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1885		max_rx_len = NETCP_MAX_FRAME_SIZE;
1886
1887	/* Enable correct MII mode at SS level */
1888	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1889	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1890		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1891		xgmii_mode |= (1 << slave->slave_num);
1892		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1893	}
1894
1895	if (IS_SS_ID_MU(gbe_dev))
1896		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
1897	else
1898		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
1899
1900	writel(max_rx_len, rx_maxlen_reg);
1901	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902}
1903
1904static void gbe_slave_stop(struct gbe_intf *intf)
1905{
1906	struct gbe_priv *gbe_dev = intf->gbe_dev;
1907	struct gbe_slave *slave = intf->slave;
1908
1909	gbe_port_reset(slave);
1910	/* Disable forwarding */
1911	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1912			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1913	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1914			   1 << slave->port_num, 0, 0);
1915
1916	if (!slave->phy)
1917		return;
1918
1919	phy_stop(slave->phy);
1920	phy_disconnect(slave->phy);
1921	slave->phy = NULL;
1922}
1923
1924static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1925{
1926	void __iomem *sgmii_port_regs;
1927
1928	sgmii_port_regs = priv->sgmii_port_regs;
1929	if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1930		sgmii_port_regs = priv->sgmii_port34_regs;
1931
1932	if (!SLAVE_LINK_IS_XGMII(slave)) {
1933		netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1934		netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1935				   slave->link_interface);
1936	}
1937}
1938
1939static int gbe_slave_open(struct gbe_intf *gbe_intf)
1940{
1941	struct gbe_priv *priv = gbe_intf->gbe_dev;
1942	struct gbe_slave *slave = gbe_intf->slave;
1943	phy_interface_t phy_mode;
1944	bool has_phy = false;
1945
1946	void (*hndlr)(struct net_device *) = gbe_adjust_link;
1947
1948	gbe_sgmii_config(priv, slave);
1949	gbe_port_reset(slave);
1950	gbe_port_config(priv, slave, priv->rx_packet_max);
1951	gbe_set_slave_mac(slave, gbe_intf);
1952	/* enable forwarding */
1953	cpsw_ale_control_set(priv->ale, slave->port_num,
1954			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1955	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1956			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1957
1958	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1959		has_phy = true;
1960		phy_mode = PHY_INTERFACE_MODE_SGMII;
1961		slave->phy_port_t = PORT_MII;
1962	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1963		has_phy = true;
1964		phy_mode = PHY_INTERFACE_MODE_NA;
1965		slave->phy_port_t = PORT_FIBRE;
1966	}
1967
1968	if (has_phy) {
1969		if (priv->ss_version == XGBE_SS_VERSION_10)
1970			hndlr = xgbe_adjust_link;
1971
1972		slave->phy = of_phy_connect(gbe_intf->ndev,
1973					    slave->phy_node,
1974					    hndlr, 0,
1975					    phy_mode);
1976		if (!slave->phy) {
1977			dev_err(priv->dev, "phy not found on slave %d\n",
1978				slave->slave_num);
1979			return -ENODEV;
1980		}
1981		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1982			dev_name(&slave->phy->dev));
1983		phy_start(slave->phy);
1984		phy_read_status(slave->phy);
1985	}
1986	return 0;
1987}
1988
1989static void gbe_init_host_port(struct gbe_priv *priv)
1990{
1991	int bypass_en = 1;
1992
1993	/* Host Tx Pri */
1994	if (IS_SS_ID_NU(priv))
1995		writel(HOST_TX_PRI_MAP_DEFAULT,
1996		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
1997
1998	/* Max length register */
1999	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2000						  rx_maxlen));
2001
2002	cpsw_ale_start(priv->ale);
2003
2004	if (priv->enable_ale)
2005		bypass_en = 0;
2006
2007	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2008
2009	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2010
2011	cpsw_ale_control_set(priv->ale, priv->host_port,
2012			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2013
2014	cpsw_ale_control_set(priv->ale, 0,
2015			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2016			     GBE_PORT_MASK(priv->ale_ports));
2017
2018	cpsw_ale_control_set(priv->ale, 0,
2019			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2020			     GBE_PORT_MASK(priv->ale_ports - 1));
2021
2022	cpsw_ale_control_set(priv->ale, 0,
2023			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2024			     GBE_PORT_MASK(priv->ale_ports));
2025
2026	cpsw_ale_control_set(priv->ale, 0,
2027			     ALE_PORT_UNTAGGED_EGRESS,
2028			     GBE_PORT_MASK(priv->ale_ports));
2029}
2030
2031static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2032{
2033	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2034	u16 vlan_id;
2035
2036	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2037			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2038			   ALE_MCAST_FWD_2);
2039	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2040		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2041				   GBE_PORT_MASK(gbe_dev->ale_ports),
2042				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2043	}
2044}
2045
2046static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2047{
2048	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2049	u16 vlan_id;
2050
2051	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2052
2053	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2054		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2055				   ALE_VLAN, vlan_id);
2056}
2057
2058static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2059{
2060	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2061	u16 vlan_id;
2062
2063	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2064
2065	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2066		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2067	}
2068}
2069
2070static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2071{
2072	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2073	u16 vlan_id;
2074
2075	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2076
2077	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2078		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2079				   ALE_VLAN, vlan_id);
2080	}
2081}
2082
2083static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2084{
2085	struct gbe_intf *gbe_intf = intf_priv;
2086	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2087
2088	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2089		naddr->addr, naddr->type);
2090
2091	switch (naddr->type) {
2092	case ADDR_MCAST:
2093	case ADDR_BCAST:
2094		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2095		break;
2096	case ADDR_UCAST:
2097	case ADDR_DEV:
2098		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2099		break;
2100	case ADDR_ANY:
2101		/* nothing to do for promiscuous */
2102	default:
2103		break;
2104	}
2105
2106	return 0;
2107}
2108
2109static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2110{
2111	struct gbe_intf *gbe_intf = intf_priv;
2112	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2113
2114	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2115		naddr->addr, naddr->type);
2116
2117	switch (naddr->type) {
2118	case ADDR_MCAST:
2119	case ADDR_BCAST:
2120		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2121		break;
2122	case ADDR_UCAST:
2123	case ADDR_DEV:
2124		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2125		break;
2126	case ADDR_ANY:
2127		/* nothing to do for promiscuous */
2128	default:
2129		break;
2130	}
2131
2132	return 0;
2133}
2134
2135static int gbe_add_vid(void *intf_priv, int vid)
2136{
2137	struct gbe_intf *gbe_intf = intf_priv;
2138	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2139
2140	set_bit(vid, gbe_intf->active_vlans);
2141
2142	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2143			  GBE_PORT_MASK(gbe_dev->ale_ports),
2144			  GBE_MASK_NO_PORTS,
2145			  GBE_PORT_MASK(gbe_dev->ale_ports),
2146			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2147
2148	return 0;
2149}
2150
2151static int gbe_del_vid(void *intf_priv, int vid)
2152{
2153	struct gbe_intf *gbe_intf = intf_priv;
2154	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2155
2156	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2157	clear_bit(vid, gbe_intf->active_vlans);
2158	return 0;
2159}
2160
2161static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2162{
2163	struct gbe_intf *gbe_intf = intf_priv;
2164	struct phy_device *phy = gbe_intf->slave->phy;
2165	int ret = -EOPNOTSUPP;
2166
2167	if (phy)
2168		ret = phy_mii_ioctl(phy, req, cmd);
2169
2170	return ret;
2171}
2172
2173static void netcp_ethss_timer(unsigned long arg)
2174{
2175	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2176	struct gbe_intf *gbe_intf;
2177	struct gbe_slave *slave;
2178
2179	/* Check & update SGMII link state of interfaces */
2180	for_each_intf(gbe_intf, gbe_dev) {
2181		if (!gbe_intf->slave->open)
2182			continue;
2183		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2184					      gbe_intf->ndev);
2185	}
2186
2187	/* Check & update SGMII link state of secondary ports */
2188	for_each_sec_slave(slave, gbe_dev) {
2189		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2190	}
2191
2192	spin_lock_bh(&gbe_dev->hw_stats_lock);
2193
2194	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2195		gbe_update_stats_ver14(gbe_dev, NULL);
2196	else
2197		gbe_update_stats(gbe_dev, NULL);
2198
2199	spin_unlock_bh(&gbe_dev->hw_stats_lock);
2200
2201	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2202	add_timer(&gbe_dev->timer);
2203}
2204
2205static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2206{
2207	struct gbe_intf *gbe_intf = data;
2208
2209	p_info->tx_pipe = &gbe_intf->tx_pipe;
2210	return 0;
2211}
2212
2213static int gbe_open(void *intf_priv, struct net_device *ndev)
2214{
2215	struct gbe_intf *gbe_intf = intf_priv;
2216	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2217	struct netcp_intf *netcp = netdev_priv(ndev);
2218	struct gbe_slave *slave = gbe_intf->slave;
2219	int port_num = slave->port_num;
2220	u32 reg;
2221	int ret;
2222
2223	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2224	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2225		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2226		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2227
2228	/* For 10G and on NetCP 1.5, use directed to port */
2229	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2230		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2231
2232	if (gbe_dev->enable_ale)
2233		gbe_intf->tx_pipe.switch_to_port = 0;
2234	else
2235		gbe_intf->tx_pipe.switch_to_port = port_num;
2236
2237	dev_dbg(gbe_dev->dev,
2238		"opened TX channel %s: %p with to port %d, flags %d\n",
2239		gbe_intf->tx_pipe.dma_chan_name,
2240		gbe_intf->tx_pipe.dma_channel,
2241		gbe_intf->tx_pipe.switch_to_port,
2242		gbe_intf->tx_pipe.flags);
2243
2244	gbe_slave_stop(gbe_intf);
2245
2246	/* disable priority elevation and enable statistics on all ports */
2247	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2248
2249	/* Control register */
2250	writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2251
2252	/* All statistics enabled and STAT AB visible by default */
2253	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2254						    stat_port_en));
2255
2256	ret = gbe_slave_open(gbe_intf);
2257	if (ret)
2258		goto fail;
2259
2260	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2261			      gbe_intf);
2262
2263	slave->open = true;
2264	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2265	return 0;
2266
2267fail:
2268	gbe_slave_stop(gbe_intf);
2269	return ret;
2270}
2271
2272static int gbe_close(void *intf_priv, struct net_device *ndev)
2273{
2274	struct gbe_intf *gbe_intf = intf_priv;
2275	struct netcp_intf *netcp = netdev_priv(ndev);
2276
2277	gbe_slave_stop(gbe_intf);
2278	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2279				gbe_intf);
2280
2281	gbe_intf->slave->open = false;
2282	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2283	return 0;
2284}
2285
2286static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2287		      struct device_node *node)
2288{
2289	int port_reg_num;
2290	u32 port_reg_ofs, emac_reg_ofs;
2291	u32 port_reg_blk_sz, emac_reg_blk_sz;
2292
2293	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2294		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2295		return -EINVAL;
2296	}
2297
2298	if (of_property_read_u32(node, "link-interface",
2299				 &slave->link_interface)) {
2300		dev_warn(gbe_dev->dev,
2301			 "missing link-interface value defaulting to 1G mac-phy link\n");
2302		slave->link_interface = SGMII_LINK_MAC_PHY;
2303	}
2304
2305	slave->open = false;
2306	slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2307	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2308
2309	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2310		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2311	else
2312		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2313
2314	/* Emac regs memmap are contiguous but port regs are not */
2315	port_reg_num = slave->slave_num;
2316	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2317		if (slave->slave_num > 1) {
2318			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2319			port_reg_num -= 2;
2320		} else {
2321			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2322		}
2323		emac_reg_ofs = GBE13_EMAC_OFFSET;
2324		port_reg_blk_sz = 0x30;
2325		emac_reg_blk_sz = 0x40;
2326	} else if (IS_SS_ID_MU(gbe_dev)) {
2327		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2328		emac_reg_ofs = GBENU_EMAC_OFFSET;
2329		port_reg_blk_sz = 0x1000;
2330		emac_reg_blk_sz = 0x1000;
2331	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2332		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2333		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2334		port_reg_blk_sz = 0x30;
2335		emac_reg_blk_sz = 0x40;
2336	} else {
2337		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2338			gbe_dev->ss_version);
2339		return -EINVAL;
2340	}
2341
2342	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2343				(port_reg_blk_sz * port_reg_num);
2344	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2345				(emac_reg_blk_sz * slave->slave_num);
2346
2347	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2348		/* Initialize  slave port register offsets */
2349		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2350		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2351		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2352		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2353		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2354		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2355		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2356		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2357		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2358
2359		/* Initialize EMAC register offsets */
2360		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2361		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2362		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2363
2364	} else if (IS_SS_ID_MU(gbe_dev)) {
2365		/* Initialize  slave port register offsets */
2366		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2367		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2368		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2369		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2370		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2371		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2372		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2373		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2374		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2375		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2376
2377		/* Initialize EMAC register offsets */
2378		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2379		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2380
2381	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2382		/* Initialize  slave port register offsets */
2383		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2384		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2385		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2386		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2387		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2388		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2389		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2390		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2391		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2392
2393		/* Initialize EMAC register offsets */
2394		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2395		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2396		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2397	}
2398
2399	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
2400	return 0;
2401}
2402
2403static void init_secondary_ports(struct gbe_priv *gbe_dev,
2404				 struct device_node *node)
2405{
2406	struct device *dev = gbe_dev->dev;
2407	phy_interface_t phy_mode;
2408	struct gbe_priv **priv;
2409	struct device_node *port;
2410	struct gbe_slave *slave;
2411	bool mac_phy_link = false;
2412
2413	for_each_child_of_node(node, port) {
2414		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2415		if (!slave) {
2416			dev_err(dev,
2417				"memomry alloc failed for secondary port(%s), skipping...\n",
2418				port->name);
2419			continue;
2420		}
2421
2422		if (init_slave(gbe_dev, slave, port)) {
2423			dev_err(dev,
2424				"Failed to initialize secondary port(%s), skipping...\n",
2425				port->name);
2426			devm_kfree(dev, slave);
2427			continue;
2428		}
2429
2430		gbe_sgmii_config(gbe_dev, slave);
2431		gbe_port_reset(slave);
2432		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2433		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2434		gbe_dev->num_slaves++;
2435		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2436		    (slave->link_interface == XGMII_LINK_MAC_PHY))
2437			mac_phy_link = true;
2438
2439		slave->open = true;
2440		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2441			break;
2442	}
2443
2444	/* of_phy_connect() is needed only for MAC-PHY interface */
2445	if (!mac_phy_link)
2446		return;
2447
2448	/* Allocate dummy netdev device for attaching to phy device */
2449	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2450					NET_NAME_UNKNOWN, ether_setup);
2451	if (!gbe_dev->dummy_ndev) {
2452		dev_err(dev,
2453			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2454		return;
2455	}
2456	priv = netdev_priv(gbe_dev->dummy_ndev);
2457	*priv = gbe_dev;
2458
2459	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2460		phy_mode = PHY_INTERFACE_MODE_SGMII;
2461		slave->phy_port_t = PORT_MII;
2462	} else {
2463		phy_mode = PHY_INTERFACE_MODE_NA;
2464		slave->phy_port_t = PORT_FIBRE;
2465	}
2466
2467	for_each_sec_slave(slave, gbe_dev) {
2468		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2469		    (slave->link_interface != XGMII_LINK_MAC_PHY))
2470			continue;
2471		slave->phy =
2472			of_phy_connect(gbe_dev->dummy_ndev,
2473				       slave->phy_node,
2474				       gbe_adjust_link_sec_slaves,
2475				       0, phy_mode);
2476		if (!slave->phy) {
2477			dev_err(dev, "phy not found for slave %d\n",
2478				slave->slave_num);
2479			slave->phy = NULL;
2480		} else {
2481			dev_dbg(dev, "phy found: id is: 0x%s\n",
2482				dev_name(&slave->phy->dev));
2483			phy_start(slave->phy);
2484			phy_read_status(slave->phy);
2485		}
2486	}
2487}
2488
2489static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{
2491	struct gbe_slave *slave;
2492
2493	for (;;) {
2494		slave = first_sec_slave(gbe_dev);
2495		if (!slave)
2496			break;
2497		if (slave->phy)
2498			phy_disconnect(slave->phy);
2499		list_del(&slave->slave_list);
2500	}
2501	if (gbe_dev->dummy_ndev)
2502		free_netdev(gbe_dev->dummy_ndev);
2503}
2504
2505static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2506				 struct device_node *node)
2507{
2508	struct resource res;
2509	void __iomem *regs;
2510	int ret, i;
2511
2512	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
2513	if (ret) {
2514		dev_err(gbe_dev->dev,
2515			"Can't xlate xgbe of node(%s) ss address at %d\n",
2516			node->name, XGBE_SS_REG_INDEX);
2517		return ret;
2518	}
2519
2520	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2521	if (IS_ERR(regs)) {
2522		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
2523		return PTR_ERR(regs);
2524	}
2525	gbe_dev->ss_regs = regs;
2526
2527	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2528	if (ret) {
2529		dev_err(gbe_dev->dev,
2530			"Can't xlate xgbe of node(%s) sm address at %d\n",
2531			node->name, XGBE_SM_REG_INDEX);
2532		return ret;
2533	}
2534
2535	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2536	if (IS_ERR(regs)) {
2537		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2538		return PTR_ERR(regs);
2539	}
2540	gbe_dev->switch_regs = regs;
2541
2542	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2543	if (ret) {
2544		dev_err(gbe_dev->dev,
2545			"Can't xlate xgbe serdes of node(%s) address at %d\n",
2546			node->name, XGBE_SERDES_REG_INDEX);
2547		return ret;
2548	}
2549
2550	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2551	if (IS_ERR(regs)) {
2552		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2553		return PTR_ERR(regs);
2554	}
2555	gbe_dev->xgbe_serdes_regs = regs;
2556
2557	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2558				  XGBE10_NUM_STAT_ENTRIES *
2559				  (gbe_dev->max_num_ports) * sizeof(u64),
2560				  GFP_KERNEL);
2561	if (!gbe_dev->hw_stats) {
2562		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2563		return -ENOMEM;
2564	}
2565
2566	gbe_dev->ss_version = XGBE_SS_VERSION_10;
2567	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2568					XGBE10_SGMII_MODULE_OFFSET;
2569	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2570
2571	for (i = 0; i < gbe_dev->max_num_ports; i++)
2572		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2573			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2574
2575	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
2576	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2577	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2578	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2579	gbe_dev->et_stats = xgbe10_et_stats;
2580	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
2581	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2582
2583	/* Subsystem registers */
2584	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2585	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2586
2587	/* Switch module registers */
2588	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2589	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2590	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2591	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2592	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2593
2594	/* Host port registers */
2595	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2596	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2597	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2598	return 0;
2599}
2600
2601static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2602				    struct device_node *node)
2603{
2604	struct resource res;
2605	void __iomem *regs;
2606	int ret;
2607
2608	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
2609	if (ret) {
2610		dev_err(gbe_dev->dev,
2611			"Can't translate of node(%s) of gbe ss address at %d\n",
2612			node->name, GBE_SS_REG_INDEX);
2613		return ret;
2614	}
2615
2616	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2617	if (IS_ERR(regs)) {
2618		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2619		return PTR_ERR(regs);
2620	}
2621	gbe_dev->ss_regs = regs;
2622	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2623	return 0;
2624}
2625
2626static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2627				struct device_node *node)
2628{
2629	struct resource res;
2630	void __iomem *regs;
2631	int i, ret;
2632
2633	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2634	if (ret) {
2635		dev_err(gbe_dev->dev,
2636			"Can't translate of gbe node(%s) address at index %d\n",
2637			node->name, GBE_SGMII34_REG_INDEX);
2638		return ret;
2639	}
2640
2641	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2642	if (IS_ERR(regs)) {
2643		dev_err(gbe_dev->dev,
2644			"Failed to map gbe sgmii port34 register base\n");
2645		return PTR_ERR(regs);
2646	}
2647	gbe_dev->sgmii_port34_regs = regs;
2648
2649	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2650	if (ret) {
2651		dev_err(gbe_dev->dev,
2652			"Can't translate of gbe node(%s) address at index %d\n",
2653			node->name, GBE_SM_REG_INDEX);
2654		return ret;
2655	}
2656
2657	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2658	if (IS_ERR(regs)) {
2659		dev_err(gbe_dev->dev,
2660			"Failed to map gbe switch module register base\n");
2661		return PTR_ERR(regs);
2662	}
2663	gbe_dev->switch_regs = regs;
2664
2665	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2666					  GBE13_NUM_HW_STAT_ENTRIES *
2667					  gbe_dev->max_num_slaves * sizeof(u64),
2668					  GFP_KERNEL);
2669	if (!gbe_dev->hw_stats) {
2670		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2671		return -ENOMEM;
2672	}
2673
2674	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2675	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
2676
2677	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
2678		gbe_dev->hw_stats_regs[i] =
2679			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2680			(GBE_HW_STATS_REG_MAP_SZ * i);
2681	}
2682
2683	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
2684	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2685	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2686	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2687	gbe_dev->et_stats = gbe13_et_stats;
2688	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
2689	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
2690
2691	/* Subsystem registers */
2692	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2693
2694	/* Switch module registers */
2695	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2696	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2697	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2698	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2699	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2700	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2701
2702	/* Host port registers */
2703	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2704	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2705	return 0;
2706}
2707
2708static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2709				struct device_node *node)
2710{
2711	struct resource res;
2712	void __iomem *regs;
2713	int i, ret;
2714
2715	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2716				  GBENU_NUM_HW_STAT_ENTRIES *
2717				  (gbe_dev->max_num_ports) * sizeof(u64),
2718				  GFP_KERNEL);
2719	if (!gbe_dev->hw_stats) {
2720		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2721		return -ENOMEM;
2722	}
2723
2724	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2725	if (ret) {
2726		dev_err(gbe_dev->dev,
2727			"Can't translate of gbenu node(%s) addr at index %d\n",
2728			node->name, GBENU_SM_REG_INDEX);
2729		return ret;
2730	}
2731
2732	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2733	if (IS_ERR(regs)) {
2734		dev_err(gbe_dev->dev,
2735			"Failed to map gbenu switch module register base\n");
2736		return PTR_ERR(regs);
2737	}
2738	gbe_dev->switch_regs = regs;
2739
2740	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2741	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2742
2743	for (i = 0; i < (gbe_dev->max_num_ports); i++)
2744		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2745			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
2746
2747	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
2748	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2749	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
2750	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2751	gbe_dev->et_stats = gbenu_et_stats;
2752	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2753
2754	if (IS_SS_ID_NU(gbe_dev))
2755		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2756			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2757	else
2758		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2759					GBENU_ET_STATS_PORT_SIZE;
2760
2761	/* Subsystem registers */
2762	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2763
2764	/* Switch module registers */
2765	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2766	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
2767	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2768	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2769
2770	/* Host port registers */
2771	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2772	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2773
2774	/* For NU only.  2U does not need tx_pri_map.
2775	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
2776	 * while 2U has only 1 such thread
2777	 */
2778	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2779	return 0;
2780}
2781
2782static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2783		     struct device_node *node, void **inst_priv)
2784{
2785	struct device_node *interfaces, *interface;
2786	struct device_node *secondary_ports;
2787	struct cpsw_ale_params ale_params;
2788	struct gbe_priv *gbe_dev;
2789	u32 slave_num;
2790	int ret = 0;
2791
2792	if (!node) {
2793		dev_err(dev, "device tree info unavailable\n");
2794		return -ENODEV;
2795	}
2796
2797	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
2798	if (!gbe_dev)
2799		return -ENOMEM;
2800
2801	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
2802	    of_device_is_compatible(node, "ti,netcp-gbe")) {
2803		gbe_dev->max_num_slaves = 4;
2804	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
2805		gbe_dev->max_num_slaves = 8;
2806	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
2807		gbe_dev->max_num_slaves = 1;
2808	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
2809		gbe_dev->max_num_slaves = 2;
2810	} else {
2811		dev_err(dev, "device tree node for unknown device\n");
2812		return -EINVAL;
2813	}
2814	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
2815
2816	gbe_dev->dev = dev;
2817	gbe_dev->netcp_device = netcp_device;
2818	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
2819
2820	/* init the hw stats lock */
2821	spin_lock_init(&gbe_dev->hw_stats_lock);
2822
2823	if (of_find_property(node, "enable-ale", NULL)) {
2824		gbe_dev->enable_ale = true;
2825		dev_info(dev, "ALE enabled\n");
2826	} else {
2827		gbe_dev->enable_ale = false;
2828		dev_dbg(dev, "ALE bypass enabled*\n");
2829	}
2830
2831	ret = of_property_read_u32(node, "tx-queue",
2832				   &gbe_dev->tx_queue_id);
2833	if (ret < 0) {
2834		dev_err(dev, "missing tx_queue parameter\n");
2835		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
2836	}
2837
2838	ret = of_property_read_string(node, "tx-channel",
2839				      &gbe_dev->dma_chan_name);
2840	if (ret < 0) {
2841		dev_err(dev, "missing \"tx-channel\" parameter\n");
2842		ret = -ENODEV;
2843		goto quit;
2844	}
2845
2846	if (!strcmp(node->name, "gbe")) {
2847		ret = get_gbe_resource_version(gbe_dev, node);
2848		if (ret)
2849			goto quit;
2850
2851		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852
2853		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2854			ret = set_gbe_ethss14_priv(gbe_dev, node);
2855		else if (IS_SS_ID_MU(gbe_dev))
2856			ret = set_gbenu_ethss_priv(gbe_dev, node);
2857		else
2858			ret = -ENODEV;
2859
2860		if (ret)
2861			goto quit;
2862	} else if (!strcmp(node->name, "xgbe")) {
2863		ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864		if (ret)
2865			goto quit;
2866		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867					     gbe_dev->ss_regs);
2868		if (ret)
2869			goto quit;
2870	} else {
2871		dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872		ret = -ENODEV;
2873		goto quit;
2874	}
2875
2876	interfaces = of_get_child_by_name(node, "interfaces");
2877	if (!interfaces)
2878		dev_err(dev, "could not find interfaces\n");
2879
2880	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882	if (ret)
2883		goto quit;
2884
2885	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886	if (ret)
2887		goto quit;
2888
2889	/* Create network interfaces */
2890	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
2891	for_each_child_of_node(interfaces, interface) {
2892		ret = of_property_read_u32(interface, "slave-port", &slave_num);
2893		if (ret) {
2894			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
2895				interface->name);
2896			continue;
2897		}
2898		gbe_dev->num_slaves++;
2899		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900			break;
2901	}
2902
2903	if (!gbe_dev->num_slaves)
2904		dev_warn(dev, "No network interface configured\n");
2905
2906	/* Initialize Secondary slave ports */
2907	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
2908	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
2909	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
2910		init_secondary_ports(gbe_dev, secondary_ports);
2911	of_node_put(secondary_ports);
2912
2913	if (!gbe_dev->num_slaves) {
2914		dev_err(dev, "No network interface or secondary ports configured\n");
2915		ret = -ENODEV;
2916		goto quit;
2917	}
2918
2919	memset(&ale_params, 0, sizeof(ale_params));
2920	ale_params.dev		= gbe_dev->dev;
2921	ale_params.ale_regs	= gbe_dev->ale_reg;
2922	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
2923	ale_params.ale_entries	= gbe_dev->ale_entries;
2924	ale_params.ale_ports	= gbe_dev->ale_ports;
2925
2926	gbe_dev->ale = cpsw_ale_create(&ale_params);
2927	if (!gbe_dev->ale) {
2928		dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929		ret = -ENODEV;
2930		goto quit;
2931	} else {
2932		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933	}
2934
2935	/* initialize host port */
2936	gbe_init_host_port(gbe_dev);
2937
2938	init_timer(&gbe_dev->timer);
2939	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
2940	gbe_dev->timer.function = netcp_ethss_timer;
2941	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
2942	add_timer(&gbe_dev->timer);
2943	*inst_priv = gbe_dev;
2944	return 0;
2945
2946quit:
2947	if (gbe_dev->hw_stats)
2948		devm_kfree(dev, gbe_dev->hw_stats);
2949	cpsw_ale_destroy(gbe_dev->ale);
2950	if (gbe_dev->ss_regs)
2951		devm_iounmap(dev, gbe_dev->ss_regs);
2952	of_node_put(interfaces);
2953	devm_kfree(dev, gbe_dev);
2954	return ret;
2955}
2956
2957static int gbe_attach(void *inst_priv, struct net_device *ndev,
2958		      struct device_node *node, void **intf_priv)
2959{
2960	struct gbe_priv *gbe_dev = inst_priv;
2961	struct gbe_intf *gbe_intf;
2962	int ret;
2963
2964	if (!node) {
2965		dev_err(gbe_dev->dev, "interface node not available\n");
2966		return -ENODEV;
2967	}
2968
2969	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2970	if (!gbe_intf)
2971		return -ENOMEM;
2972
2973	gbe_intf->ndev = ndev;
2974	gbe_intf->dev = gbe_dev->dev;
2975	gbe_intf->gbe_dev = gbe_dev;
2976
2977	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2978					sizeof(*gbe_intf->slave),
2979					GFP_KERNEL);
2980	if (!gbe_intf->slave) {
2981		ret = -ENOMEM;
2982		goto fail;
2983	}
2984
2985	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2986		ret = -ENODEV;
2987		goto fail;
2988	}
2989
2990	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2991	ndev->ethtool_ops = &keystone_ethtool_ops;
2992	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2993	*intf_priv = gbe_intf;
2994	return 0;
2995
2996fail:
2997	if (gbe_intf->slave)
2998		devm_kfree(gbe_dev->dev, gbe_intf->slave);
2999	if (gbe_intf)
3000		devm_kfree(gbe_dev->dev, gbe_intf);
3001	return ret;
3002}
3003
3004static int gbe_release(void *intf_priv)
3005{
3006	struct gbe_intf *gbe_intf = intf_priv;
3007
3008	gbe_intf->ndev->ethtool_ops = NULL;
3009	list_del(&gbe_intf->gbe_intf_list);
3010	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3011	devm_kfree(gbe_intf->dev, gbe_intf);
3012	return 0;
3013}
3014
3015static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3016{
3017	struct gbe_priv *gbe_dev = inst_priv;
3018
3019	del_timer_sync(&gbe_dev->timer);
3020	cpsw_ale_stop(gbe_dev->ale);
3021	cpsw_ale_destroy(gbe_dev->ale);
3022	netcp_txpipe_close(&gbe_dev->tx_pipe);
3023	free_secondary_ports(gbe_dev);
3024
3025	if (!list_empty(&gbe_dev->gbe_intf_head))
3026		dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
3027
3028	devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029	devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030	memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031	devm_kfree(gbe_dev->dev, gbe_dev);
3032	return 0;
3033}
3034
3035static struct netcp_module gbe_module = {
3036	.name		= GBE_MODULE_NAME,
3037	.owner		= THIS_MODULE,
3038	.primary	= true,
3039	.probe		= gbe_probe,
3040	.open		= gbe_open,
3041	.close		= gbe_close,
3042	.remove		= gbe_remove,
3043	.attach		= gbe_attach,
3044	.release	= gbe_release,
3045	.add_addr	= gbe_add_addr,
3046	.del_addr	= gbe_del_addr,
3047	.add_vid	= gbe_add_vid,
3048	.del_vid	= gbe_del_vid,
3049	.ioctl		= gbe_ioctl,
3050};
3051
3052static struct netcp_module xgbe_module = {
3053	.name		= XGBE_MODULE_NAME,
3054	.owner		= THIS_MODULE,
3055	.primary	= true,
3056	.probe		= gbe_probe,
3057	.open		= gbe_open,
3058	.close		= gbe_close,
3059	.remove		= gbe_remove,
3060	.attach		= gbe_attach,
3061	.release	= gbe_release,
3062	.add_addr	= gbe_add_addr,
3063	.del_addr	= gbe_del_addr,
3064	.add_vid	= gbe_add_vid,
3065	.del_vid	= gbe_del_vid,
3066	.ioctl		= gbe_ioctl,
3067};
3068
3069static int __init keystone_gbe_init(void)
3070{
3071	int ret;
3072
3073	ret = netcp_register_module(&gbe_module);
3074	if (ret)
3075		return ret;
3076
3077	ret = netcp_register_module(&xgbe_module);
3078	if (ret)
3079		return ret;
3080
3081	return 0;
3082}
3083module_init(keystone_gbe_init);
3084
3085static void __exit keystone_gbe_exit(void)
3086{
3087	netcp_unregister_module(&gbe_module);
3088	netcp_unregister_module(&xgbe_module);
3089}
3090module_exit(keystone_gbe_exit);
3091
3092MODULE_LICENSE("GPL v2");
3093MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3094MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3095