1/* bnx2.c: QLogic bnx2 network driver.
2 *
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Michael Chan  (mchan@broadcom.com)
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17
18#include <linux/stringify.h>
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/errno.h>
22#include <linux/ioport.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/dma-mapping.h>
31#include <linux/bitops.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <linux/delay.h>
35#include <asm/byteorder.h>
36#include <asm/page.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/firmware.h>
50#include <linux/log2.h>
51#include <linux/aer.h>
52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
57#include "bnx2.h"
58#include "bnx2_fw.h"
59
60#define DRV_MODULE_NAME		"bnx2"
61#define DRV_MODULE_VERSION	"2.2.6"
62#define DRV_MODULE_RELDATE	"January 29, 2014"
63#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT  (5*HZ)
73
74static char version[] =
75	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87static int disable_msi = 0;
88
89module_param(disable_msi, int, S_IRUGO);
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93	BCM5706 = 0,
94	NC370T,
95	NC370I,
96	BCM5706S,
97	NC370F,
98	BCM5708,
99	BCM5708S,
100	BCM5709,
101	BCM5709S,
102	BCM5716,
103	BCM5716S,
104} board_t;
105
106/* indexed by board_t, above */
107static struct {
108	char *name;
109} board_info[] = {
110	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121	};
122
123static const struct pci_device_id bnx2_pci_tbl[] = {
124	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146	{ 0, }
147};
148
149static const struct flash_spec flash_table[] =
150{
151#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153	/* Slow EEPROM */
154	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157	 "EEPROM - slow"},
158	/* Expansion entry 0001 */
159	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162	 "Entry 0001"},
163	/* Saifun SA25F010 (non-buffered flash) */
164	/* strap, cfg1, & write1 need updates */
165	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168	 "Non-buffered flash (128kB)"},
169	/* Saifun SA25F020 (non-buffered flash) */
170	/* strap, cfg1, & write1 need updates */
171	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174	 "Non-buffered flash (256kB)"},
175	/* Expansion entry 0100 */
176	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179	 "Entry 0100"},
180	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190	/* Saifun SA25F005 (non-buffered flash) */
191	/* strap, cfg1, & write1 need updates */
192	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195	 "Non-buffered flash (64kB)"},
196	/* Fast EEPROM */
197	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200	 "EEPROM - fast"},
201	/* Expansion entry 1001 */
202	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205	 "Entry 1001"},
206	/* Expansion entry 1010 */
207	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210	 "Entry 1010"},
211	/* ATMEL AT45DB011B (buffered flash) */
212	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215	 "Buffered flash (128kB)"},
216	/* Expansion entry 1100 */
217	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220	 "Entry 1100"},
221	/* Expansion entry 1101 */
222	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225	 "Entry 1101"},
226	/* Ateml Expansion entry 1110 */
227	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230	 "Entry 1110 (Atmel)"},
231	/* ATMEL AT45DB021B (buffered flash) */
232	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235	 "Buffered flash (256kB)"},
236};
237
238static const struct flash_spec flash_5709 = {
239	.flags		= BNX2_NV_BUFFERED,
240	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244	.name		= "5709 Buffered flash (256kB)",
245};
246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249static void bnx2_init_napi(struct bnx2 *bp);
250static void bnx2_del_napi(struct bnx2 *bp);
251
252static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253{
254	u32 diff;
255
256	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257	barrier();
258
259	/* The ring uses 256 indices for 255 entries, one of them
260	 * needs to be skipped.
261	 */
262	diff = txr->tx_prod - txr->tx_cons;
263	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264		diff &= 0xffff;
265		if (diff == BNX2_TX_DESC_CNT)
266			diff = BNX2_MAX_TX_DESC_CNT;
267	}
268	return bp->tx_ring_size - diff;
269}
270
271static u32
272bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273{
274	u32 val;
275
276	spin_lock_bh(&bp->indirect_lock);
277	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279	spin_unlock_bh(&bp->indirect_lock);
280	return val;
281}
282
283static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{
286	spin_lock_bh(&bp->indirect_lock);
287	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289	spin_unlock_bh(&bp->indirect_lock);
290}
291
292static void
293bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294{
295	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296}
297
298static u32
299bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300{
301	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302}
303
304static void
305bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306{
307	offset += cid_addr;
308	spin_lock_bh(&bp->indirect_lock);
309	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310		int i;
311
312		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315		for (i = 0; i < 5; i++) {
316			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318				break;
319			udelay(5);
320		}
321	} else {
322		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323		BNX2_WR(bp, BNX2_CTX_DATA, val);
324	}
325	spin_unlock_bh(&bp->indirect_lock);
326}
327
328#ifdef BCM_CNIC
329static int
330bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331{
332	struct bnx2 *bp = netdev_priv(dev);
333	struct drv_ctl_io *io = &info->data.io;
334
335	switch (info->cmd) {
336	case DRV_CTL_IO_WR_CMD:
337		bnx2_reg_wr_ind(bp, io->offset, io->data);
338		break;
339	case DRV_CTL_IO_RD_CMD:
340		io->data = bnx2_reg_rd_ind(bp, io->offset);
341		break;
342	case DRV_CTL_CTX_WR_CMD:
343		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344		break;
345	default:
346		return -EINVAL;
347	}
348	return 0;
349}
350
351static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352{
353	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355	int sb_id;
356
357	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359		bnapi->cnic_present = 0;
360		sb_id = bp->irq_nvecs;
361		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362	} else {
363		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364		bnapi->cnic_tag = bnapi->last_status_idx;
365		bnapi->cnic_present = 1;
366		sb_id = 0;
367		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368	}
369
370	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371	cp->irq_arr[0].status_blk = (void *)
372		((unsigned long) bnapi->status_blk.msi +
373		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374	cp->irq_arr[0].status_blk_num = sb_id;
375	cp->num_irq = 1;
376}
377
378static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379			      void *data)
380{
381	struct bnx2 *bp = netdev_priv(dev);
382	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384	if (ops == NULL)
385		return -EINVAL;
386
387	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388		return -EBUSY;
389
390	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391		return -ENODEV;
392
393	bp->cnic_data = data;
394	rcu_assign_pointer(bp->cnic_ops, ops);
395
396	cp->num_irq = 0;
397	cp->drv_state = CNIC_DRV_STATE_REGD;
398
399	bnx2_setup_cnic_irq_info(bp);
400
401	return 0;
402}
403
404static int bnx2_unregister_cnic(struct net_device *dev)
405{
406	struct bnx2 *bp = netdev_priv(dev);
407	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410	mutex_lock(&bp->cnic_lock);
411	cp->drv_state = 0;
412	bnapi->cnic_present = 0;
413	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414	mutex_unlock(&bp->cnic_lock);
415	synchronize_rcu();
416	return 0;
417}
418
419static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420{
421	struct bnx2 *bp = netdev_priv(dev);
422	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424	if (!cp->max_iscsi_conn)
425		return NULL;
426
427	cp->drv_owner = THIS_MODULE;
428	cp->chip_id = bp->chip_id;
429	cp->pdev = bp->pdev;
430	cp->io_base = bp->regview;
431	cp->drv_ctl = bnx2_drv_ctl;
432	cp->drv_register_cnic = bnx2_register_cnic;
433	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435	return cp;
436}
437
438static void
439bnx2_cnic_stop(struct bnx2 *bp)
440{
441	struct cnic_ops *c_ops;
442	struct cnic_ctl_info info;
443
444	mutex_lock(&bp->cnic_lock);
445	c_ops = rcu_dereference_protected(bp->cnic_ops,
446					  lockdep_is_held(&bp->cnic_lock));
447	if (c_ops) {
448		info.cmd = CNIC_CTL_STOP_CMD;
449		c_ops->cnic_ctl(bp->cnic_data, &info);
450	}
451	mutex_unlock(&bp->cnic_lock);
452}
453
454static void
455bnx2_cnic_start(struct bnx2 *bp)
456{
457	struct cnic_ops *c_ops;
458	struct cnic_ctl_info info;
459
460	mutex_lock(&bp->cnic_lock);
461	c_ops = rcu_dereference_protected(bp->cnic_ops,
462					  lockdep_is_held(&bp->cnic_lock));
463	if (c_ops) {
464		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467			bnapi->cnic_tag = bnapi->last_status_idx;
468		}
469		info.cmd = CNIC_CTL_START_CMD;
470		c_ops->cnic_ctl(bp->cnic_data, &info);
471	}
472	mutex_unlock(&bp->cnic_lock);
473}
474
475#else
476
477static void
478bnx2_cnic_stop(struct bnx2 *bp)
479{
480}
481
482static void
483bnx2_cnic_start(struct bnx2 *bp)
484{
485}
486
487#endif
488
489static int
490bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491{
492	u32 val1;
493	int i, ret;
494
495	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502		udelay(40);
503	}
504
505	val1 = (bp->phy_addr << 21) | (reg << 16) |
506		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507		BNX2_EMAC_MDIO_COMM_START_BUSY;
508	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510	for (i = 0; i < 50; i++) {
511		udelay(10);
512
513		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515			udelay(5);
516
517			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520			break;
521		}
522	}
523
524	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525		*val = 0x0;
526		ret = -EBUSY;
527	}
528	else {
529		*val = val1;
530		ret = 0;
531	}
532
533	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540		udelay(40);
541	}
542
543	return ret;
544}
545
546static int
547bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548{
549	u32 val1;
550	int i, ret;
551
552	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559		udelay(40);
560	}
561
562	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567	for (i = 0; i < 50; i++) {
568		udelay(10);
569
570		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572			udelay(5);
573			break;
574		}
575	}
576
577	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578        	ret = -EBUSY;
579	else
580		ret = 0;
581
582	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589		udelay(40);
590	}
591
592	return ret;
593}
594
595static void
596bnx2_disable_int(struct bnx2 *bp)
597{
598	int i;
599	struct bnx2_napi *bnapi;
600
601	for (i = 0; i < bp->irq_nvecs; i++) {
602		bnapi = &bp->bnx2_napi[i];
603		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605	}
606	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607}
608
609static void
610bnx2_enable_int(struct bnx2 *bp)
611{
612	int i;
613	struct bnx2_napi *bnapi;
614
615	for (i = 0; i < bp->irq_nvecs; i++) {
616		bnapi = &bp->bnx2_napi[i];
617
618		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621			bnapi->last_status_idx);
622
623		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625			bnapi->last_status_idx);
626	}
627	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628}
629
630static void
631bnx2_disable_int_sync(struct bnx2 *bp)
632{
633	int i;
634
635	atomic_inc(&bp->intr_sem);
636	if (!netif_running(bp->dev))
637		return;
638
639	bnx2_disable_int(bp);
640	for (i = 0; i < bp->irq_nvecs; i++)
641		synchronize_irq(bp->irq_tbl[i].vector);
642}
643
644static void
645bnx2_napi_disable(struct bnx2 *bp)
646{
647	int i;
648
649	for (i = 0; i < bp->irq_nvecs; i++)
650		napi_disable(&bp->bnx2_napi[i].napi);
651}
652
653static void
654bnx2_napi_enable(struct bnx2 *bp)
655{
656	int i;
657
658	for (i = 0; i < bp->irq_nvecs; i++)
659		napi_enable(&bp->bnx2_napi[i].napi);
660}
661
662static void
663bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664{
665	if (stop_cnic)
666		bnx2_cnic_stop(bp);
667	if (netif_running(bp->dev)) {
668		bnx2_napi_disable(bp);
669		netif_tx_disable(bp->dev);
670	}
671	bnx2_disable_int_sync(bp);
672	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673}
674
675static void
676bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677{
678	if (atomic_dec_and_test(&bp->intr_sem)) {
679		if (netif_running(bp->dev)) {
680			netif_tx_wake_all_queues(bp->dev);
681			spin_lock_bh(&bp->phy_lock);
682			if (bp->link_up)
683				netif_carrier_on(bp->dev);
684			spin_unlock_bh(&bp->phy_lock);
685			bnx2_napi_enable(bp);
686			bnx2_enable_int(bp);
687			if (start_cnic)
688				bnx2_cnic_start(bp);
689		}
690	}
691}
692
693static void
694bnx2_free_tx_mem(struct bnx2 *bp)
695{
696	int i;
697
698	for (i = 0; i < bp->num_tx_rings; i++) {
699		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702		if (txr->tx_desc_ring) {
703			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704					  txr->tx_desc_ring,
705					  txr->tx_desc_mapping);
706			txr->tx_desc_ring = NULL;
707		}
708		kfree(txr->tx_buf_ring);
709		txr->tx_buf_ring = NULL;
710	}
711}
712
713static void
714bnx2_free_rx_mem(struct bnx2 *bp)
715{
716	int i;
717
718	for (i = 0; i < bp->num_rx_rings; i++) {
719		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721		int j;
722
723		for (j = 0; j < bp->rx_max_ring; j++) {
724			if (rxr->rx_desc_ring[j])
725				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726						  rxr->rx_desc_ring[j],
727						  rxr->rx_desc_mapping[j]);
728			rxr->rx_desc_ring[j] = NULL;
729		}
730		vfree(rxr->rx_buf_ring);
731		rxr->rx_buf_ring = NULL;
732
733		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734			if (rxr->rx_pg_desc_ring[j])
735				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736						  rxr->rx_pg_desc_ring[j],
737						  rxr->rx_pg_desc_mapping[j]);
738			rxr->rx_pg_desc_ring[j] = NULL;
739		}
740		vfree(rxr->rx_pg_ring);
741		rxr->rx_pg_ring = NULL;
742	}
743}
744
745static int
746bnx2_alloc_tx_mem(struct bnx2 *bp)
747{
748	int i;
749
750	for (i = 0; i < bp->num_tx_rings; i++) {
751		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755		if (txr->tx_buf_ring == NULL)
756			return -ENOMEM;
757
758		txr->tx_desc_ring =
759			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760					   &txr->tx_desc_mapping, GFP_KERNEL);
761		if (txr->tx_desc_ring == NULL)
762			return -ENOMEM;
763	}
764	return 0;
765}
766
767static int
768bnx2_alloc_rx_mem(struct bnx2 *bp)
769{
770	int i;
771
772	for (i = 0; i < bp->num_rx_rings; i++) {
773		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775		int j;
776
777		rxr->rx_buf_ring =
778			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779		if (rxr->rx_buf_ring == NULL)
780			return -ENOMEM;
781
782		for (j = 0; j < bp->rx_max_ring; j++) {
783			rxr->rx_desc_ring[j] =
784				dma_alloc_coherent(&bp->pdev->dev,
785						   RXBD_RING_SIZE,
786						   &rxr->rx_desc_mapping[j],
787						   GFP_KERNEL);
788			if (rxr->rx_desc_ring[j] == NULL)
789				return -ENOMEM;
790
791		}
792
793		if (bp->rx_pg_ring_size) {
794			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795						  bp->rx_max_pg_ring);
796			if (rxr->rx_pg_ring == NULL)
797				return -ENOMEM;
798
799		}
800
801		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802			rxr->rx_pg_desc_ring[j] =
803				dma_alloc_coherent(&bp->pdev->dev,
804						   RXBD_RING_SIZE,
805						   &rxr->rx_pg_desc_mapping[j],
806						   GFP_KERNEL);
807			if (rxr->rx_pg_desc_ring[j] == NULL)
808				return -ENOMEM;
809
810		}
811	}
812	return 0;
813}
814
815static void
816bnx2_free_mem(struct bnx2 *bp)
817{
818	int i;
819	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821	bnx2_free_tx_mem(bp);
822	bnx2_free_rx_mem(bp);
823
824	for (i = 0; i < bp->ctx_pages; i++) {
825		if (bp->ctx_blk[i]) {
826			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827					  bp->ctx_blk[i],
828					  bp->ctx_blk_mapping[i]);
829			bp->ctx_blk[i] = NULL;
830		}
831	}
832	if (bnapi->status_blk.msi) {
833		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834				  bnapi->status_blk.msi,
835				  bp->status_blk_mapping);
836		bnapi->status_blk.msi = NULL;
837		bp->stats_blk = NULL;
838	}
839}
840
841static int
842bnx2_alloc_mem(struct bnx2 *bp)
843{
844	int i, status_blk_size, err;
845	struct bnx2_napi *bnapi;
846	void *status_blk;
847
848	/* Combine status and statistics blocks into one allocation. */
849	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853	bp->status_stats_size = status_blk_size +
854				sizeof(struct statistics_block);
855
856	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857					 &bp->status_blk_mapping, GFP_KERNEL);
858	if (status_blk == NULL)
859		goto alloc_mem_err;
860
861	bnapi = &bp->bnx2_napi[0];
862	bnapi->status_blk.msi = status_blk;
863	bnapi->hw_tx_cons_ptr =
864		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865	bnapi->hw_rx_cons_ptr =
866		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868		for (i = 1; i < bp->irq_nvecs; i++) {
869			struct status_block_msix *sblk;
870
871			bnapi = &bp->bnx2_napi[i];
872
873			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874			bnapi->status_blk.msix = sblk;
875			bnapi->hw_tx_cons_ptr =
876				&sblk->status_tx_quick_consumer_index;
877			bnapi->hw_rx_cons_ptr =
878				&sblk->status_rx_quick_consumer_index;
879			bnapi->int_num = i << 24;
880		}
881	}
882
883	bp->stats_blk = status_blk + status_blk_size;
884
885	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886
887	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889		if (bp->ctx_pages == 0)
890			bp->ctx_pages = 1;
891		for (i = 0; i < bp->ctx_pages; i++) {
892			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893						BNX2_PAGE_SIZE,
894						&bp->ctx_blk_mapping[i],
895						GFP_KERNEL);
896			if (bp->ctx_blk[i] == NULL)
897				goto alloc_mem_err;
898		}
899	}
900
901	err = bnx2_alloc_rx_mem(bp);
902	if (err)
903		goto alloc_mem_err;
904
905	err = bnx2_alloc_tx_mem(bp);
906	if (err)
907		goto alloc_mem_err;
908
909	return 0;
910
911alloc_mem_err:
912	bnx2_free_mem(bp);
913	return -ENOMEM;
914}
915
916static void
917bnx2_report_fw_link(struct bnx2 *bp)
918{
919	u32 fw_link_status = 0;
920
921	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922		return;
923
924	if (bp->link_up) {
925		u32 bmsr;
926
927		switch (bp->line_speed) {
928		case SPEED_10:
929			if (bp->duplex == DUPLEX_HALF)
930				fw_link_status = BNX2_LINK_STATUS_10HALF;
931			else
932				fw_link_status = BNX2_LINK_STATUS_10FULL;
933			break;
934		case SPEED_100:
935			if (bp->duplex == DUPLEX_HALF)
936				fw_link_status = BNX2_LINK_STATUS_100HALF;
937			else
938				fw_link_status = BNX2_LINK_STATUS_100FULL;
939			break;
940		case SPEED_1000:
941			if (bp->duplex == DUPLEX_HALF)
942				fw_link_status = BNX2_LINK_STATUS_1000HALF;
943			else
944				fw_link_status = BNX2_LINK_STATUS_1000FULL;
945			break;
946		case SPEED_2500:
947			if (bp->duplex == DUPLEX_HALF)
948				fw_link_status = BNX2_LINK_STATUS_2500HALF;
949			else
950				fw_link_status = BNX2_LINK_STATUS_2500FULL;
951			break;
952		}
953
954		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955
956		if (bp->autoneg) {
957			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958
959			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961
962			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965			else
966				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967		}
968	}
969	else
970		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971
972	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973}
974
975static char *
976bnx2_xceiver_str(struct bnx2 *bp)
977{
978	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980		 "Copper");
981}
982
983static void
984bnx2_report_link(struct bnx2 *bp)
985{
986	if (bp->link_up) {
987		netif_carrier_on(bp->dev);
988		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989			    bnx2_xceiver_str(bp),
990			    bp->line_speed,
991			    bp->duplex == DUPLEX_FULL ? "full" : "half");
992
993		if (bp->flow_ctrl) {
994			if (bp->flow_ctrl & FLOW_CTRL_RX) {
995				pr_cont(", receive ");
996				if (bp->flow_ctrl & FLOW_CTRL_TX)
997					pr_cont("& transmit ");
998			}
999			else {
1000				pr_cont(", transmit ");
1001			}
1002			pr_cont("flow control ON");
1003		}
1004		pr_cont("\n");
1005	} else {
1006		netif_carrier_off(bp->dev);
1007		netdev_err(bp->dev, "NIC %s Link is Down\n",
1008			   bnx2_xceiver_str(bp));
1009	}
1010
1011	bnx2_report_fw_link(bp);
1012}
1013
1014static void
1015bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016{
1017	u32 local_adv, remote_adv;
1018
1019	bp->flow_ctrl = 0;
1020	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022
1023		if (bp->duplex == DUPLEX_FULL) {
1024			bp->flow_ctrl = bp->req_flow_ctrl;
1025		}
1026		return;
1027	}
1028
1029	if (bp->duplex != DUPLEX_FULL) {
1030		return;
1031	}
1032
1033	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035		u32 val;
1036
1037		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039			bp->flow_ctrl |= FLOW_CTRL_TX;
1040		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041			bp->flow_ctrl |= FLOW_CTRL_RX;
1042		return;
1043	}
1044
1045	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047
1048	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049		u32 new_local_adv = 0;
1050		u32 new_remote_adv = 0;
1051
1052		if (local_adv & ADVERTISE_1000XPAUSE)
1053			new_local_adv |= ADVERTISE_PAUSE_CAP;
1054		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056		if (remote_adv & ADVERTISE_1000XPAUSE)
1057			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060
1061		local_adv = new_local_adv;
1062		remote_adv = new_remote_adv;
1063	}
1064
1065	/* See Table 28B-3 of 802.3ab-1999 spec. */
1066	if (local_adv & ADVERTISE_PAUSE_CAP) {
1067		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070			}
1071			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072				bp->flow_ctrl = FLOW_CTRL_RX;
1073			}
1074		}
1075		else {
1076			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078			}
1079		}
1080	}
1081	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084
1085			bp->flow_ctrl = FLOW_CTRL_TX;
1086		}
1087	}
1088}
1089
1090static int
1091bnx2_5709s_linkup(struct bnx2 *bp)
1092{
1093	u32 val, speed;
1094
1095	bp->link_up = 1;
1096
1097	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100
1101	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102		bp->line_speed = bp->req_line_speed;
1103		bp->duplex = bp->req_duplex;
1104		return 0;
1105	}
1106	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107	switch (speed) {
1108		case MII_BNX2_GP_TOP_AN_SPEED_10:
1109			bp->line_speed = SPEED_10;
1110			break;
1111		case MII_BNX2_GP_TOP_AN_SPEED_100:
1112			bp->line_speed = SPEED_100;
1113			break;
1114		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116			bp->line_speed = SPEED_1000;
1117			break;
1118		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119			bp->line_speed = SPEED_2500;
1120			break;
1121	}
1122	if (val & MII_BNX2_GP_TOP_AN_FD)
1123		bp->duplex = DUPLEX_FULL;
1124	else
1125		bp->duplex = DUPLEX_HALF;
1126	return 0;
1127}
1128
1129static int
1130bnx2_5708s_linkup(struct bnx2 *bp)
1131{
1132	u32 val;
1133
1134	bp->link_up = 1;
1135	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137		case BCM5708S_1000X_STAT1_SPEED_10:
1138			bp->line_speed = SPEED_10;
1139			break;
1140		case BCM5708S_1000X_STAT1_SPEED_100:
1141			bp->line_speed = SPEED_100;
1142			break;
1143		case BCM5708S_1000X_STAT1_SPEED_1G:
1144			bp->line_speed = SPEED_1000;
1145			break;
1146		case BCM5708S_1000X_STAT1_SPEED_2G5:
1147			bp->line_speed = SPEED_2500;
1148			break;
1149	}
1150	if (val & BCM5708S_1000X_STAT1_FD)
1151		bp->duplex = DUPLEX_FULL;
1152	else
1153		bp->duplex = DUPLEX_HALF;
1154
1155	return 0;
1156}
1157
1158static int
1159bnx2_5706s_linkup(struct bnx2 *bp)
1160{
1161	u32 bmcr, local_adv, remote_adv, common;
1162
1163	bp->link_up = 1;
1164	bp->line_speed = SPEED_1000;
1165
1166	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167	if (bmcr & BMCR_FULLDPLX) {
1168		bp->duplex = DUPLEX_FULL;
1169	}
1170	else {
1171		bp->duplex = DUPLEX_HALF;
1172	}
1173
1174	if (!(bmcr & BMCR_ANENABLE)) {
1175		return 0;
1176	}
1177
1178	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180
1181	common = local_adv & remote_adv;
1182	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183
1184		if (common & ADVERTISE_1000XFULL) {
1185			bp->duplex = DUPLEX_FULL;
1186		}
1187		else {
1188			bp->duplex = DUPLEX_HALF;
1189		}
1190	}
1191
1192	return 0;
1193}
1194
1195static int
1196bnx2_copper_linkup(struct bnx2 *bp)
1197{
1198	u32 bmcr;
1199
1200	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201
1202	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203	if (bmcr & BMCR_ANENABLE) {
1204		u32 local_adv, remote_adv, common;
1205
1206		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209		common = local_adv & (remote_adv >> 2);
1210		if (common & ADVERTISE_1000FULL) {
1211			bp->line_speed = SPEED_1000;
1212			bp->duplex = DUPLEX_FULL;
1213		}
1214		else if (common & ADVERTISE_1000HALF) {
1215			bp->line_speed = SPEED_1000;
1216			bp->duplex = DUPLEX_HALF;
1217		}
1218		else {
1219			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222			common = local_adv & remote_adv;
1223			if (common & ADVERTISE_100FULL) {
1224				bp->line_speed = SPEED_100;
1225				bp->duplex = DUPLEX_FULL;
1226			}
1227			else if (common & ADVERTISE_100HALF) {
1228				bp->line_speed = SPEED_100;
1229				bp->duplex = DUPLEX_HALF;
1230			}
1231			else if (common & ADVERTISE_10FULL) {
1232				bp->line_speed = SPEED_10;
1233				bp->duplex = DUPLEX_FULL;
1234			}
1235			else if (common & ADVERTISE_10HALF) {
1236				bp->line_speed = SPEED_10;
1237				bp->duplex = DUPLEX_HALF;
1238			}
1239			else {
1240				bp->line_speed = 0;
1241				bp->link_up = 0;
1242			}
1243		}
1244	}
1245	else {
1246		if (bmcr & BMCR_SPEED100) {
1247			bp->line_speed = SPEED_100;
1248		}
1249		else {
1250			bp->line_speed = SPEED_10;
1251		}
1252		if (bmcr & BMCR_FULLDPLX) {
1253			bp->duplex = DUPLEX_FULL;
1254		}
1255		else {
1256			bp->duplex = DUPLEX_HALF;
1257		}
1258	}
1259
1260	if (bp->link_up) {
1261		u32 ext_status;
1262
1263		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1264		if (ext_status & EXT_STATUS_MDIX)
1265			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1266	}
1267
1268	return 0;
1269}
1270
1271static void
1272bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1273{
1274	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1275
1276	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1277	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1278	val |= 0x02 << 8;
1279
1280	if (bp->flow_ctrl & FLOW_CTRL_TX)
1281		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1282
1283	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284}
1285
1286static void
1287bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288{
1289	int i;
1290	u32 cid;
1291
1292	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293		if (i == 1)
1294			cid = RX_RSS_CID;
1295		bnx2_init_rx_context(bp, cid);
1296	}
1297}
1298
1299static void
1300bnx2_set_mac_link(struct bnx2 *bp)
1301{
1302	u32 val;
1303
1304	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1305	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306		(bp->duplex == DUPLEX_HALF)) {
1307		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308	}
1309
1310	/* Configure the EMAC mode register. */
1311	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1312
1313	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1314		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1315		BNX2_EMAC_MODE_25G_MODE);
1316
1317	if (bp->link_up) {
1318		switch (bp->line_speed) {
1319			case SPEED_10:
1320				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1321					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1322					break;
1323				}
1324				/* fall through */
1325			case SPEED_100:
1326				val |= BNX2_EMAC_MODE_PORT_MII;
1327				break;
1328			case SPEED_2500:
1329				val |= BNX2_EMAC_MODE_25G_MODE;
1330				/* fall through */
1331			case SPEED_1000:
1332				val |= BNX2_EMAC_MODE_PORT_GMII;
1333				break;
1334		}
1335	}
1336	else {
1337		val |= BNX2_EMAC_MODE_PORT_GMII;
1338	}
1339
1340	/* Set the MAC to operate in the appropriate duplex mode. */
1341	if (bp->duplex == DUPLEX_HALF)
1342		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1343	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1344
1345	/* Enable/disable rx PAUSE. */
1346	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347
1348	if (bp->flow_ctrl & FLOW_CTRL_RX)
1349		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1350	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351
1352	/* Enable/disable tx PAUSE. */
1353	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1354	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355
1356	if (bp->flow_ctrl & FLOW_CTRL_TX)
1357		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1358	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1359
1360	/* Acknowledge the interrupt. */
1361	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362
1363	bnx2_init_all_rx_contexts(bp);
1364}
1365
1366static void
1367bnx2_enable_bmsr1(struct bnx2 *bp)
1368{
1369	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372			       MII_BNX2_BLK_ADDR_GP_STATUS);
1373}
1374
1375static void
1376bnx2_disable_bmsr1(struct bnx2 *bp)
1377{
1378	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1379	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1380		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382}
1383
1384static int
1385bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386{
1387	u32 up1;
1388	int ret = 1;
1389
1390	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1391		return 0;
1392
1393	if (bp->autoneg & AUTONEG_SPEED)
1394		bp->advertising |= ADVERTISED_2500baseX_Full;
1395
1396	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398
1399	bnx2_read_phy(bp, bp->mii_up1, &up1);
1400	if (!(up1 & BCM5708S_UP1_2G5)) {
1401		up1 |= BCM5708S_UP1_2G5;
1402		bnx2_write_phy(bp, bp->mii_up1, up1);
1403		ret = 0;
1404	}
1405
1406	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1407		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409
1410	return ret;
1411}
1412
1413static int
1414bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415{
1416	u32 up1;
1417	int ret = 0;
1418
1419	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1420		return 0;
1421
1422	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424
1425	bnx2_read_phy(bp, bp->mii_up1, &up1);
1426	if (up1 & BCM5708S_UP1_2G5) {
1427		up1 &= ~BCM5708S_UP1_2G5;
1428		bnx2_write_phy(bp, bp->mii_up1, up1);
1429		ret = 1;
1430	}
1431
1432	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1433		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435
1436	return ret;
1437}
1438
1439static void
1440bnx2_enable_forced_2g5(struct bnx2 *bp)
1441{
1442	u32 uninitialized_var(bmcr);
1443	int err;
1444
1445	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446		return;
1447
1448	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1449		u32 val;
1450
1451		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1453		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1454			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455			val |= MII_BNX2_SD_MISC1_FORCE |
1456				MII_BNX2_SD_MISC1_FORCE_2_5G;
1457			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1458		}
1459
1460		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463
1464	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1465		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466		if (!err)
1467			bmcr |= BCM5708S_BMCR_FORCE_2500;
1468	} else {
1469		return;
1470	}
1471
1472	if (err)
1473		return;
1474
1475	if (bp->autoneg & AUTONEG_SPEED) {
1476		bmcr &= ~BMCR_ANENABLE;
1477		if (bp->req_duplex == DUPLEX_FULL)
1478			bmcr |= BMCR_FULLDPLX;
1479	}
1480	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1481}
1482
1483static void
1484bnx2_disable_forced_2g5(struct bnx2 *bp)
1485{
1486	u32 uninitialized_var(bmcr);
1487	int err;
1488
1489	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490		return;
1491
1492	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1493		u32 val;
1494
1495		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1497		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1498			val &= ~MII_BNX2_SD_MISC1_FORCE;
1499			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500		}
1501
1502		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1504		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505
1506	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1507		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508		if (!err)
1509			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510	} else {
1511		return;
1512	}
1513
1514	if (err)
1515		return;
1516
1517	if (bp->autoneg & AUTONEG_SPEED)
1518		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1520}
1521
1522static void
1523bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524{
1525	u32 val;
1526
1527	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529	if (start)
1530		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531	else
1532		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1533}
1534
1535static int
1536bnx2_set_link(struct bnx2 *bp)
1537{
1538	u32 bmsr;
1539	u8 link_up;
1540
1541	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1542		bp->link_up = 1;
1543		return 0;
1544	}
1545
1546	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1547		return 0;
1548
1549	link_up = bp->link_up;
1550
1551	bnx2_enable_bmsr1(bp);
1552	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554	bnx2_disable_bmsr1(bp);
1555
1556	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1557	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1558		u32 val, an_dbg;
1559
1560		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1561			bnx2_5706s_force_link_dn(bp, 0);
1562			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1563		}
1564		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1565
1566		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569
1570		if ((val & BNX2_EMAC_STATUS_LINK) &&
1571		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1572			bmsr |= BMSR_LSTATUS;
1573		else
1574			bmsr &= ~BMSR_LSTATUS;
1575	}
1576
1577	if (bmsr & BMSR_LSTATUS) {
1578		bp->link_up = 1;
1579
1580		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1581			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1582				bnx2_5706s_linkup(bp);
1583			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1584				bnx2_5708s_linkup(bp);
1585			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1586				bnx2_5709s_linkup(bp);
1587		}
1588		else {
1589			bnx2_copper_linkup(bp);
1590		}
1591		bnx2_resolve_flow_ctrl(bp);
1592	}
1593	else {
1594		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1595		    (bp->autoneg & AUTONEG_SPEED))
1596			bnx2_disable_forced_2g5(bp);
1597
1598		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1599			u32 bmcr;
1600
1601			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602			bmcr |= BMCR_ANENABLE;
1603			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604
1605			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1606		}
1607		bp->link_up = 0;
1608	}
1609
1610	if (bp->link_up != link_up) {
1611		bnx2_report_link(bp);
1612	}
1613
1614	bnx2_set_mac_link(bp);
1615
1616	return 0;
1617}
1618
1619static int
1620bnx2_reset_phy(struct bnx2 *bp)
1621{
1622	int i;
1623	u32 reg;
1624
1625        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1626
1627#define PHY_RESET_MAX_WAIT 100
1628	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629		udelay(10);
1630
1631		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1632		if (!(reg & BMCR_RESET)) {
1633			udelay(20);
1634			break;
1635		}
1636	}
1637	if (i == PHY_RESET_MAX_WAIT) {
1638		return -EBUSY;
1639	}
1640	return 0;
1641}
1642
1643static u32
1644bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645{
1646	u32 adv = 0;
1647
1648	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650
1651		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652			adv = ADVERTISE_1000XPAUSE;
1653		}
1654		else {
1655			adv = ADVERTISE_PAUSE_CAP;
1656		}
1657	}
1658	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1659		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660			adv = ADVERTISE_1000XPSE_ASYM;
1661		}
1662		else {
1663			adv = ADVERTISE_PAUSE_ASYM;
1664		}
1665	}
1666	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1667		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669		}
1670		else {
1671			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672		}
1673	}
1674	return adv;
1675}
1676
1677static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1678
1679static int
1680bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1681__releases(&bp->phy_lock)
1682__acquires(&bp->phy_lock)
1683{
1684	u32 speed_arg = 0, pause_adv;
1685
1686	pause_adv = bnx2_phy_get_pause_adv(bp);
1687
1688	if (bp->autoneg & AUTONEG_SPEED) {
1689		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690		if (bp->advertising & ADVERTISED_10baseT_Half)
1691			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692		if (bp->advertising & ADVERTISED_10baseT_Full)
1693			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694		if (bp->advertising & ADVERTISED_100baseT_Half)
1695			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696		if (bp->advertising & ADVERTISED_100baseT_Full)
1697			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698		if (bp->advertising & ADVERTISED_1000baseT_Full)
1699			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700		if (bp->advertising & ADVERTISED_2500baseX_Full)
1701			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702	} else {
1703		if (bp->req_line_speed == SPEED_2500)
1704			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705		else if (bp->req_line_speed == SPEED_1000)
1706			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707		else if (bp->req_line_speed == SPEED_100) {
1708			if (bp->req_duplex == DUPLEX_FULL)
1709				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710			else
1711				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712		} else if (bp->req_line_speed == SPEED_10) {
1713			if (bp->req_duplex == DUPLEX_FULL)
1714				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715			else
1716				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1717		}
1718	}
1719
1720	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1722	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1723		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724
1725	if (port == PORT_TP)
1726		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728
1729	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1730
1731	spin_unlock_bh(&bp->phy_lock);
1732	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1733	spin_lock_bh(&bp->phy_lock);
1734
1735	return 0;
1736}
1737
1738static int
1739bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1740__releases(&bp->phy_lock)
1741__acquires(&bp->phy_lock)
1742{
1743	u32 adv, bmcr;
1744	u32 new_adv = 0;
1745
1746	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1747		return bnx2_setup_remote_phy(bp, port);
1748
1749	if (!(bp->autoneg & AUTONEG_SPEED)) {
1750		u32 new_bmcr;
1751		int force_link_down = 0;
1752
1753		if (bp->req_line_speed == SPEED_2500) {
1754			if (!bnx2_test_and_enable_2g5(bp))
1755				force_link_down = 1;
1756		} else if (bp->req_line_speed == SPEED_1000) {
1757			if (bnx2_test_and_disable_2g5(bp))
1758				force_link_down = 1;
1759		}
1760		bnx2_read_phy(bp, bp->mii_adv, &adv);
1761		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762
1763		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1764		new_bmcr = bmcr & ~BMCR_ANENABLE;
1765		new_bmcr |= BMCR_SPEED1000;
1766
1767		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1768			if (bp->req_line_speed == SPEED_2500)
1769				bnx2_enable_forced_2g5(bp);
1770			else if (bp->req_line_speed == SPEED_1000) {
1771				bnx2_disable_forced_2g5(bp);
1772				new_bmcr &= ~0x2000;
1773			}
1774
1775		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1776			if (bp->req_line_speed == SPEED_2500)
1777				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778			else
1779				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1780		}
1781
1782		if (bp->req_duplex == DUPLEX_FULL) {
1783			adv |= ADVERTISE_1000XFULL;
1784			new_bmcr |= BMCR_FULLDPLX;
1785		}
1786		else {
1787			adv |= ADVERTISE_1000XHALF;
1788			new_bmcr &= ~BMCR_FULLDPLX;
1789		}
1790		if ((new_bmcr != bmcr) || (force_link_down)) {
1791			/* Force a link down visible on the other side */
1792			if (bp->link_up) {
1793				bnx2_write_phy(bp, bp->mii_adv, adv &
1794					       ~(ADVERTISE_1000XFULL |
1795						 ADVERTISE_1000XHALF));
1796				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1797					BMCR_ANRESTART | BMCR_ANENABLE);
1798
1799				bp->link_up = 0;
1800				netif_carrier_off(bp->dev);
1801				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1802				bnx2_report_link(bp);
1803			}
1804			bnx2_write_phy(bp, bp->mii_adv, adv);
1805			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1806		} else {
1807			bnx2_resolve_flow_ctrl(bp);
1808			bnx2_set_mac_link(bp);
1809		}
1810		return 0;
1811	}
1812
1813	bnx2_test_and_enable_2g5(bp);
1814
1815	if (bp->advertising & ADVERTISED_1000baseT_Full)
1816		new_adv |= ADVERTISE_1000XFULL;
1817
1818	new_adv |= bnx2_phy_get_pause_adv(bp);
1819
1820	bnx2_read_phy(bp, bp->mii_adv, &adv);
1821	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1822
1823	bp->serdes_an_pending = 0;
1824	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825		/* Force a link down visible on the other side */
1826		if (bp->link_up) {
1827			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1828			spin_unlock_bh(&bp->phy_lock);
1829			msleep(20);
1830			spin_lock_bh(&bp->phy_lock);
1831		}
1832
1833		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1835			BMCR_ANENABLE);
1836		/* Speed up link-up time when the link partner
1837		 * does not autonegotiate which is very common
1838		 * in blade servers. Some blade servers use
1839		 * IPMI for kerboard input and it's important
1840		 * to minimize link disruptions. Autoneg. involves
1841		 * exchanging base pages plus 3 next pages and
1842		 * normally completes in about 120 msec.
1843		 */
1844		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1845		bp->serdes_an_pending = 1;
1846		mod_timer(&bp->timer, jiffies + bp->current_interval);
1847	} else {
1848		bnx2_resolve_flow_ctrl(bp);
1849		bnx2_set_mac_link(bp);
1850	}
1851
1852	return 0;
1853}
1854
1855#define ETHTOOL_ALL_FIBRE_SPEED						\
1856	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1857		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858		(ADVERTISED_1000baseT_Full)
1859
1860#define ETHTOOL_ALL_COPPER_SPEED					\
1861	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1862	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1863	ADVERTISED_1000baseT_Full)
1864
1865#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1867
1868#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869
1870static void
1871bnx2_set_default_remote_link(struct bnx2 *bp)
1872{
1873	u32 link;
1874
1875	if (bp->phy_port == PORT_TP)
1876		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1877	else
1878		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1879
1880	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881		bp->req_line_speed = 0;
1882		bp->autoneg |= AUTONEG_SPEED;
1883		bp->advertising = ADVERTISED_Autoneg;
1884		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885			bp->advertising |= ADVERTISED_10baseT_Half;
1886		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887			bp->advertising |= ADVERTISED_10baseT_Full;
1888		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889			bp->advertising |= ADVERTISED_100baseT_Half;
1890		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891			bp->advertising |= ADVERTISED_100baseT_Full;
1892		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893			bp->advertising |= ADVERTISED_1000baseT_Full;
1894		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895			bp->advertising |= ADVERTISED_2500baseX_Full;
1896	} else {
1897		bp->autoneg = 0;
1898		bp->advertising = 0;
1899		bp->req_duplex = DUPLEX_FULL;
1900		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901			bp->req_line_speed = SPEED_10;
1902			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903				bp->req_duplex = DUPLEX_HALF;
1904		}
1905		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906			bp->req_line_speed = SPEED_100;
1907			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908				bp->req_duplex = DUPLEX_HALF;
1909		}
1910		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911			bp->req_line_speed = SPEED_1000;
1912		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913			bp->req_line_speed = SPEED_2500;
1914	}
1915}
1916
1917static void
1918bnx2_set_default_link(struct bnx2 *bp)
1919{
1920	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921		bnx2_set_default_remote_link(bp);
1922		return;
1923	}
1924
1925	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926	bp->req_line_speed = 0;
1927	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1928		u32 reg;
1929
1930		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931
1932		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1933		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935			bp->autoneg = 0;
1936			bp->req_line_speed = bp->line_speed = SPEED_1000;
1937			bp->req_duplex = DUPLEX_FULL;
1938		}
1939	} else
1940		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1941}
1942
1943static void
1944bnx2_send_heart_beat(struct bnx2 *bp)
1945{
1946	u32 msg;
1947	u32 addr;
1948
1949	spin_lock(&bp->indirect_lock);
1950	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1952	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1954	spin_unlock(&bp->indirect_lock);
1955}
1956
1957static void
1958bnx2_remote_phy_event(struct bnx2 *bp)
1959{
1960	u32 msg;
1961	u8 link_up = bp->link_up;
1962	u8 old_port;
1963
1964	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1965
1966	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967		bnx2_send_heart_beat(bp);
1968
1969	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970
1971	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972		bp->link_up = 0;
1973	else {
1974		u32 speed;
1975
1976		bp->link_up = 1;
1977		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978		bp->duplex = DUPLEX_FULL;
1979		switch (speed) {
1980			case BNX2_LINK_STATUS_10HALF:
1981				bp->duplex = DUPLEX_HALF;
1982				/* fall through */
1983			case BNX2_LINK_STATUS_10FULL:
1984				bp->line_speed = SPEED_10;
1985				break;
1986			case BNX2_LINK_STATUS_100HALF:
1987				bp->duplex = DUPLEX_HALF;
1988				/* fall through */
1989			case BNX2_LINK_STATUS_100BASE_T4:
1990			case BNX2_LINK_STATUS_100FULL:
1991				bp->line_speed = SPEED_100;
1992				break;
1993			case BNX2_LINK_STATUS_1000HALF:
1994				bp->duplex = DUPLEX_HALF;
1995				/* fall through */
1996			case BNX2_LINK_STATUS_1000FULL:
1997				bp->line_speed = SPEED_1000;
1998				break;
1999			case BNX2_LINK_STATUS_2500HALF:
2000				bp->duplex = DUPLEX_HALF;
2001				/* fall through */
2002			case BNX2_LINK_STATUS_2500FULL:
2003				bp->line_speed = SPEED_2500;
2004				break;
2005			default:
2006				bp->line_speed = 0;
2007				break;
2008		}
2009
2010		bp->flow_ctrl = 0;
2011		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2012		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2013			if (bp->duplex == DUPLEX_FULL)
2014				bp->flow_ctrl = bp->req_flow_ctrl;
2015		} else {
2016			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2017				bp->flow_ctrl |= FLOW_CTRL_TX;
2018			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2019				bp->flow_ctrl |= FLOW_CTRL_RX;
2020		}
2021
2022		old_port = bp->phy_port;
2023		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2024			bp->phy_port = PORT_FIBRE;
2025		else
2026			bp->phy_port = PORT_TP;
2027
2028		if (old_port != bp->phy_port)
2029			bnx2_set_default_link(bp);
2030
2031	}
2032	if (bp->link_up != link_up)
2033		bnx2_report_link(bp);
2034
2035	bnx2_set_mac_link(bp);
2036}
2037
2038static int
2039bnx2_set_remote_link(struct bnx2 *bp)
2040{
2041	u32 evt_code;
2042
2043	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2044	switch (evt_code) {
2045		case BNX2_FW_EVT_CODE_LINK_EVENT:
2046			bnx2_remote_phy_event(bp);
2047			break;
2048		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2049		default:
2050			bnx2_send_heart_beat(bp);
2051			break;
2052	}
2053	return 0;
2054}
2055
2056static int
2057bnx2_setup_copper_phy(struct bnx2 *bp)
2058__releases(&bp->phy_lock)
2059__acquires(&bp->phy_lock)
2060{
2061	u32 bmcr, adv_reg, new_adv = 0;
2062	u32 new_bmcr;
2063
2064	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065
2066	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068		    ADVERTISE_PAUSE_ASYM);
2069
2070	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071
2072	if (bp->autoneg & AUTONEG_SPEED) {
2073		u32 adv1000_reg;
2074		u32 new_adv1000 = 0;
2075
2076		new_adv |= bnx2_phy_get_pause_adv(bp);
2077
2078		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079		adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2082		if ((adv1000_reg != new_adv1000) ||
2083			(adv_reg != new_adv) ||
2084			((bmcr & BMCR_ANENABLE) == 0)) {
2085
2086			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2087			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2088			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089				BMCR_ANENABLE);
2090		}
2091		else if (bp->link_up) {
2092			/* Flow ctrl may have changed from auto to forced */
2093			/* or vice-versa. */
2094
2095			bnx2_resolve_flow_ctrl(bp);
2096			bnx2_set_mac_link(bp);
2097		}
2098		return 0;
2099	}
2100
2101	/* advertise nothing when forcing speed */
2102	if (adv_reg != new_adv)
2103		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104
2105	new_bmcr = 0;
2106	if (bp->req_line_speed == SPEED_100) {
2107		new_bmcr |= BMCR_SPEED100;
2108	}
2109	if (bp->req_duplex == DUPLEX_FULL) {
2110		new_bmcr |= BMCR_FULLDPLX;
2111	}
2112	if (new_bmcr != bmcr) {
2113		u32 bmsr;
2114
2115		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118		if (bmsr & BMSR_LSTATUS) {
2119			/* Force link down */
2120			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121			spin_unlock_bh(&bp->phy_lock);
2122			msleep(50);
2123			spin_lock_bh(&bp->phy_lock);
2124
2125			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127		}
2128
2129		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131		/* Normally, the new speed is setup after the link has
2132		 * gone down and up again. In some cases, link will not go
2133		 * down so we need to set up the new speed here.
2134		 */
2135		if (bmsr & BMSR_LSTATUS) {
2136			bp->line_speed = bp->req_line_speed;
2137			bp->duplex = bp->req_duplex;
2138			bnx2_resolve_flow_ctrl(bp);
2139			bnx2_set_mac_link(bp);
2140		}
2141	} else {
2142		bnx2_resolve_flow_ctrl(bp);
2143		bnx2_set_mac_link(bp);
2144	}
2145	return 0;
2146}
2147
2148static int
2149bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150__releases(&bp->phy_lock)
2151__acquires(&bp->phy_lock)
2152{
2153	if (bp->loopback == MAC_LOOPBACK)
2154		return 0;
2155
2156	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157		return bnx2_setup_serdes_phy(bp, port);
2158	}
2159	else {
2160		return bnx2_setup_copper_phy(bp);
2161	}
2162}
2163
2164static int
2165bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166{
2167	u32 val;
2168
2169	bp->mii_bmcr = MII_BMCR + 0x10;
2170	bp->mii_bmsr = MII_BMSR + 0x10;
2171	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172	bp->mii_adv = MII_ADVERTISE + 0x10;
2173	bp->mii_lpa = MII_LPA + 0x10;
2174	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180	if (reset_phy)
2181		bnx2_reset_phy(bp);
2182
2183	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193		val |= BCM5708S_UP1_2G5;
2194	else
2195		val &= ~BCM5708S_UP1_2G5;
2196	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211	return 0;
2212}
2213
2214static int
2215bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216{
2217	u32 val;
2218
2219	if (reset_phy)
2220		bnx2_reset_phy(bp);
2221
2222	bp->mii_up1 = BCM5708S_UP1;
2223
2224	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238		val |= BCM5708S_UP1_2G5;
2239		bnx2_write_phy(bp, BCM5708S_UP1, val);
2240	}
2241
2242	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2243	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2244	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2245		/* increase tx signal amplitude */
2246		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247			       BCM5708S_BLK_ADDR_TX_MISC);
2248		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252	}
2253
2254	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257	if (val) {
2258		u32 is_backplane;
2259
2260		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263				       BCM5708S_BLK_ADDR_TX_MISC);
2264			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266				       BCM5708S_BLK_ADDR_DIG);
2267		}
2268	}
2269	return 0;
2270}
2271
2272static int
2273bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274{
2275	if (reset_phy)
2276		bnx2_reset_phy(bp);
2277
2278	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2281		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283	if (bp->dev->mtu > 1500) {
2284		u32 val;
2285
2286		/* Set extended packet length bit */
2287		bnx2_write_phy(bp, 0x18, 0x7);
2288		bnx2_read_phy(bp, 0x18, &val);
2289		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291		bnx2_write_phy(bp, 0x1c, 0x6c00);
2292		bnx2_read_phy(bp, 0x1c, &val);
2293		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294	}
2295	else {
2296		u32 val;
2297
2298		bnx2_write_phy(bp, 0x18, 0x7);
2299		bnx2_read_phy(bp, 0x18, &val);
2300		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302		bnx2_write_phy(bp, 0x1c, 0x6c00);
2303		bnx2_read_phy(bp, 0x1c, &val);
2304		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305	}
2306
2307	return 0;
2308}
2309
2310static int
2311bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312{
2313	u32 val;
2314
2315	if (reset_phy)
2316		bnx2_reset_phy(bp);
2317
2318	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319		bnx2_write_phy(bp, 0x18, 0x0c00);
2320		bnx2_write_phy(bp, 0x17, 0x000a);
2321		bnx2_write_phy(bp, 0x15, 0x310b);
2322		bnx2_write_phy(bp, 0x17, 0x201f);
2323		bnx2_write_phy(bp, 0x15, 0x9506);
2324		bnx2_write_phy(bp, 0x17, 0x401f);
2325		bnx2_write_phy(bp, 0x15, 0x14e2);
2326		bnx2_write_phy(bp, 0x18, 0x0400);
2327	}
2328
2329	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2332		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333		val &= ~(1 << 8);
2334		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335	}
2336
2337	if (bp->dev->mtu > 1500) {
2338		/* Set extended packet length bit */
2339		bnx2_write_phy(bp, 0x18, 0x7);
2340		bnx2_read_phy(bp, 0x18, &val);
2341		bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343		bnx2_read_phy(bp, 0x10, &val);
2344		bnx2_write_phy(bp, 0x10, val | 0x1);
2345	}
2346	else {
2347		bnx2_write_phy(bp, 0x18, 0x7);
2348		bnx2_read_phy(bp, 0x18, &val);
2349		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351		bnx2_read_phy(bp, 0x10, &val);
2352		bnx2_write_phy(bp, 0x10, val & ~0x1);
2353	}
2354
2355	/* ethernet@wirespeed */
2356	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2357	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2358	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359
2360	/* auto-mdix */
2361	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2362		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2363
2364	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2365	return 0;
2366}
2367
2368
2369static int
2370bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2371__releases(&bp->phy_lock)
2372__acquires(&bp->phy_lock)
2373{
2374	u32 val;
2375	int rc = 0;
2376
2377	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2379
2380	bp->mii_bmcr = MII_BMCR;
2381	bp->mii_bmsr = MII_BMSR;
2382	bp->mii_bmsr1 = MII_BMSR;
2383	bp->mii_adv = MII_ADVERTISE;
2384	bp->mii_lpa = MII_LPA;
2385
2386	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2387
2388	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2389		goto setup_phy;
2390
2391	bnx2_read_phy(bp, MII_PHYSID1, &val);
2392	bp->phy_id = val << 16;
2393	bnx2_read_phy(bp, MII_PHYSID2, &val);
2394	bp->phy_id |= val & 0xffff;
2395
2396	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2397		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2398			rc = bnx2_init_5706s_phy(bp, reset_phy);
2399		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2400			rc = bnx2_init_5708s_phy(bp, reset_phy);
2401		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2402			rc = bnx2_init_5709s_phy(bp, reset_phy);
2403	}
2404	else {
2405		rc = bnx2_init_copper_phy(bp, reset_phy);
2406	}
2407
2408setup_phy:
2409	if (!rc)
2410		rc = bnx2_setup_phy(bp, bp->phy_port);
2411
2412	return rc;
2413}
2414
2415static int
2416bnx2_set_mac_loopback(struct bnx2 *bp)
2417{
2418	u32 mac_mode;
2419
2420	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2421	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2423	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2424	bp->link_up = 1;
2425	return 0;
2426}
2427
2428static int bnx2_test_link(struct bnx2 *);
2429
2430static int
2431bnx2_set_phy_loopback(struct bnx2 *bp)
2432{
2433	u32 mac_mode;
2434	int rc, i;
2435
2436	spin_lock_bh(&bp->phy_lock);
2437	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2438			    BMCR_SPEED1000);
2439	spin_unlock_bh(&bp->phy_lock);
2440	if (rc)
2441		return rc;
2442
2443	for (i = 0; i < 10; i++) {
2444		if (bnx2_test_link(bp) == 0)
2445			break;
2446		msleep(100);
2447	}
2448
2449	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2450	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2452		      BNX2_EMAC_MODE_25G_MODE);
2453
2454	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2455	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2456	bp->link_up = 1;
2457	return 0;
2458}
2459
2460static void
2461bnx2_dump_mcp_state(struct bnx2 *bp)
2462{
2463	struct net_device *dev = bp->dev;
2464	u32 mcp_p0, mcp_p1;
2465
2466	netdev_err(dev, "<--- start MCP states dump --->\n");
2467	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2468		mcp_p0 = BNX2_MCP_STATE_P0;
2469		mcp_p1 = BNX2_MCP_STATE_P1;
2470	} else {
2471		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2472		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473	}
2474	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2475		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2476	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2477		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2478		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2479		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2480	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2481		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2483		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2484	netdev_err(dev, "DEBUG: shmem states:\n");
2485	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2486		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2487		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2488		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2489	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2490	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2491		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2492		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2493	pr_cont(" condition[%08x]\n",
2494		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2495	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2496	DP_SHMEM_LINE(bp, 0x3cc);
2497	DP_SHMEM_LINE(bp, 0x3dc);
2498	DP_SHMEM_LINE(bp, 0x3ec);
2499	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2500	netdev_err(dev, "<--- end MCP states dump --->\n");
2501}
2502
2503static int
2504bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2505{
2506	int i;
2507	u32 val;
2508
2509	bp->fw_wr_seq++;
2510	msg_data |= bp->fw_wr_seq;
2511	bp->fw_last_msg = msg_data;
2512
2513	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2514
2515	if (!ack)
2516		return 0;
2517
2518	/* wait for an acknowledgement. */
2519	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2520		msleep(10);
2521
2522		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2523
2524		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2525			break;
2526	}
2527	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2528		return 0;
2529
2530	/* If we timed out, inform the firmware that this is the case. */
2531	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2532		msg_data &= ~BNX2_DRV_MSG_CODE;
2533		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2534
2535		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2536		if (!silent) {
2537			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2538			bnx2_dump_mcp_state(bp);
2539		}
2540
2541		return -EBUSY;
2542	}
2543
2544	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2545		return -EIO;
2546
2547	return 0;
2548}
2549
2550static int
2551bnx2_init_5709_context(struct bnx2 *bp)
2552{
2553	int i, ret = 0;
2554	u32 val;
2555
2556	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2557	val |= (BNX2_PAGE_BITS - 8) << 16;
2558	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2559	for (i = 0; i < 10; i++) {
2560		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2561		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2562			break;
2563		udelay(2);
2564	}
2565	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2566		return -EBUSY;
2567
2568	for (i = 0; i < bp->ctx_pages; i++) {
2569		int j;
2570
2571		if (bp->ctx_blk[i])
2572			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2573		else
2574			return -ENOMEM;
2575
2576		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2577			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2578			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2579		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2580			(u64) bp->ctx_blk_mapping[i] >> 32);
2581		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2582			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2583		for (j = 0; j < 10; j++) {
2584
2585			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2586			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2587				break;
2588			udelay(5);
2589		}
2590		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2591			ret = -EBUSY;
2592			break;
2593		}
2594	}
2595	return ret;
2596}
2597
2598static void
2599bnx2_init_context(struct bnx2 *bp)
2600{
2601	u32 vcid;
2602
2603	vcid = 96;
2604	while (vcid) {
2605		u32 vcid_addr, pcid_addr, offset;
2606		int i;
2607
2608		vcid--;
2609
2610		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2611			u32 new_vcid;
2612
2613			vcid_addr = GET_PCID_ADDR(vcid);
2614			if (vcid & 0x8) {
2615				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2616			}
2617			else {
2618				new_vcid = vcid;
2619			}
2620			pcid_addr = GET_PCID_ADDR(new_vcid);
2621		}
2622		else {
2623	    		vcid_addr = GET_CID_ADDR(vcid);
2624			pcid_addr = vcid_addr;
2625		}
2626
2627		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2628			vcid_addr += (i << PHY_CTX_SHIFT);
2629			pcid_addr += (i << PHY_CTX_SHIFT);
2630
2631			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2632			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2633
2634			/* Zero out the context. */
2635			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2636				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2637		}
2638	}
2639}
2640
2641static int
2642bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2643{
2644	u16 *good_mbuf;
2645	u32 good_mbuf_cnt;
2646	u32 val;
2647
2648	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2649	if (good_mbuf == NULL)
2650		return -ENOMEM;
2651
2652	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2653		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2654
2655	good_mbuf_cnt = 0;
2656
2657	/* Allocate a bunch of mbufs and save the good ones in an array. */
2658	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2659	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2660		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2661				BNX2_RBUF_COMMAND_ALLOC_REQ);
2662
2663		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2664
2665		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2666
2667		/* The addresses with Bit 9 set are bad memory blocks. */
2668		if (!(val & (1 << 9))) {
2669			good_mbuf[good_mbuf_cnt] = (u16) val;
2670			good_mbuf_cnt++;
2671		}
2672
2673		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674	}
2675
2676	/* Free the good ones back to the mbuf pool thus discarding
2677	 * all the bad ones. */
2678	while (good_mbuf_cnt) {
2679		good_mbuf_cnt--;
2680
2681		val = good_mbuf[good_mbuf_cnt];
2682		val = (val << 9) | val | 1;
2683
2684		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2685	}
2686	kfree(good_mbuf);
2687	return 0;
2688}
2689
2690static void
2691bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2692{
2693	u32 val;
2694
2695	val = (mac_addr[0] << 8) | mac_addr[1];
2696
2697	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2698
2699	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2700		(mac_addr[4] << 8) | mac_addr[5];
2701
2702	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2703}
2704
2705static inline int
2706bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2707{
2708	dma_addr_t mapping;
2709	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2710	struct bnx2_rx_bd *rxbd =
2711		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2712	struct page *page = alloc_page(gfp);
2713
2714	if (!page)
2715		return -ENOMEM;
2716	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2717			       PCI_DMA_FROMDEVICE);
2718	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2719		__free_page(page);
2720		return -EIO;
2721	}
2722
2723	rx_pg->page = page;
2724	dma_unmap_addr_set(rx_pg, mapping, mapping);
2725	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727	return 0;
2728}
2729
2730static void
2731bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2732{
2733	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2734	struct page *page = rx_pg->page;
2735
2736	if (!page)
2737		return;
2738
2739	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2740		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2741
2742	__free_page(page);
2743	rx_pg->page = NULL;
2744}
2745
2746static inline int
2747bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2748{
2749	u8 *data;
2750	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2751	dma_addr_t mapping;
2752	struct bnx2_rx_bd *rxbd =
2753		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2754
2755	data = kmalloc(bp->rx_buf_size, gfp);
2756	if (!data)
2757		return -ENOMEM;
2758
2759	mapping = dma_map_single(&bp->pdev->dev,
2760				 get_l2_fhdr(data),
2761				 bp->rx_buf_use_size,
2762				 PCI_DMA_FROMDEVICE);
2763	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2764		kfree(data);
2765		return -EIO;
2766	}
2767
2768	rx_buf->data = data;
2769	dma_unmap_addr_set(rx_buf, mapping, mapping);
2770
2771	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2772	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2773
2774	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2775
2776	return 0;
2777}
2778
2779static int
2780bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2781{
2782	struct status_block *sblk = bnapi->status_blk.msi;
2783	u32 new_link_state, old_link_state;
2784	int is_set = 1;
2785
2786	new_link_state = sblk->status_attn_bits & event;
2787	old_link_state = sblk->status_attn_bits_ack & event;
2788	if (new_link_state != old_link_state) {
2789		if (new_link_state)
2790			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2791		else
2792			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2793	} else
2794		is_set = 0;
2795
2796	return is_set;
2797}
2798
2799static void
2800bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2801{
2802	spin_lock(&bp->phy_lock);
2803
2804	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2805		bnx2_set_link(bp);
2806	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2807		bnx2_set_remote_link(bp);
2808
2809	spin_unlock(&bp->phy_lock);
2810
2811}
2812
2813static inline u16
2814bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2815{
2816	u16 cons;
2817
2818	/* Tell compiler that status block fields can change. */
2819	barrier();
2820	cons = *bnapi->hw_tx_cons_ptr;
2821	barrier();
2822	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2823		cons++;
2824	return cons;
2825}
2826
2827static int
2828bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2829{
2830	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2831	u16 hw_cons, sw_cons, sw_ring_cons;
2832	int tx_pkt = 0, index;
2833	unsigned int tx_bytes = 0;
2834	struct netdev_queue *txq;
2835
2836	index = (bnapi - bp->bnx2_napi);
2837	txq = netdev_get_tx_queue(bp->dev, index);
2838
2839	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2840	sw_cons = txr->tx_cons;
2841
2842	while (sw_cons != hw_cons) {
2843		struct bnx2_sw_tx_bd *tx_buf;
2844		struct sk_buff *skb;
2845		int i, last;
2846
2847		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2848
2849		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2850		skb = tx_buf->skb;
2851
2852		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2853		prefetch(&skb->end);
2854
2855		/* partial BD completions possible with TSO packets */
2856		if (tx_buf->is_gso) {
2857			u16 last_idx, last_ring_idx;
2858
2859			last_idx = sw_cons + tx_buf->nr_frags + 1;
2860			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2861			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2862				last_idx++;
2863			}
2864			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2865				break;
2866			}
2867		}
2868
2869		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2870			skb_headlen(skb), PCI_DMA_TODEVICE);
2871
2872		tx_buf->skb = NULL;
2873		last = tx_buf->nr_frags;
2874
2875		for (i = 0; i < last; i++) {
2876			struct bnx2_sw_tx_bd *tx_buf;
2877
2878			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2879
2880			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2881			dma_unmap_page(&bp->pdev->dev,
2882				dma_unmap_addr(tx_buf, mapping),
2883				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2884				PCI_DMA_TODEVICE);
2885		}
2886
2887		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2888
2889		tx_bytes += skb->len;
2890		dev_kfree_skb_any(skb);
2891		tx_pkt++;
2892		if (tx_pkt == budget)
2893			break;
2894
2895		if (hw_cons == sw_cons)
2896			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2897	}
2898
2899	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2900	txr->hw_tx_cons = hw_cons;
2901	txr->tx_cons = sw_cons;
2902
2903	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2904	 * before checking for netif_tx_queue_stopped().  Without the
2905	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2906	 * will miss it and cause the queue to be stopped forever.
2907	 */
2908	smp_mb();
2909
2910	if (unlikely(netif_tx_queue_stopped(txq)) &&
2911		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2912		__netif_tx_lock(txq, smp_processor_id());
2913		if ((netif_tx_queue_stopped(txq)) &&
2914		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2915			netif_tx_wake_queue(txq);
2916		__netif_tx_unlock(txq);
2917	}
2918
2919	return tx_pkt;
2920}
2921
2922static void
2923bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2924			struct sk_buff *skb, int count)
2925{
2926	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2927	struct bnx2_rx_bd *cons_bd, *prod_bd;
2928	int i;
2929	u16 hw_prod, prod;
2930	u16 cons = rxr->rx_pg_cons;
2931
2932	cons_rx_pg = &rxr->rx_pg_ring[cons];
2933
2934	/* The caller was unable to allocate a new page to replace the
2935	 * last one in the frags array, so we need to recycle that page
2936	 * and then free the skb.
2937	 */
2938	if (skb) {
2939		struct page *page;
2940		struct skb_shared_info *shinfo;
2941
2942		shinfo = skb_shinfo(skb);
2943		shinfo->nr_frags--;
2944		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2945		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2946
2947		cons_rx_pg->page = page;
2948		dev_kfree_skb(skb);
2949	}
2950
2951	hw_prod = rxr->rx_pg_prod;
2952
2953	for (i = 0; i < count; i++) {
2954		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2955
2956		prod_rx_pg = &rxr->rx_pg_ring[prod];
2957		cons_rx_pg = &rxr->rx_pg_ring[cons];
2958		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2959						[BNX2_RX_IDX(cons)];
2960		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2961						[BNX2_RX_IDX(prod)];
2962
2963		if (prod != cons) {
2964			prod_rx_pg->page = cons_rx_pg->page;
2965			cons_rx_pg->page = NULL;
2966			dma_unmap_addr_set(prod_rx_pg, mapping,
2967				dma_unmap_addr(cons_rx_pg, mapping));
2968
2969			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2970			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2971
2972		}
2973		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2974		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2975	}
2976	rxr->rx_pg_prod = hw_prod;
2977	rxr->rx_pg_cons = cons;
2978}
2979
2980static inline void
2981bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2982		   u8 *data, u16 cons, u16 prod)
2983{
2984	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2985	struct bnx2_rx_bd *cons_bd, *prod_bd;
2986
2987	cons_rx_buf = &rxr->rx_buf_ring[cons];
2988	prod_rx_buf = &rxr->rx_buf_ring[prod];
2989
2990	dma_sync_single_for_device(&bp->pdev->dev,
2991		dma_unmap_addr(cons_rx_buf, mapping),
2992		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2993
2994	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2995
2996	prod_rx_buf->data = data;
2997
2998	if (cons == prod)
2999		return;
3000
3001	dma_unmap_addr_set(prod_rx_buf, mapping,
3002			dma_unmap_addr(cons_rx_buf, mapping));
3003
3004	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3005	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3006	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3007	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3008}
3009
3010static struct sk_buff *
3011bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3012	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3013	    u32 ring_idx)
3014{
3015	int err;
3016	u16 prod = ring_idx & 0xffff;
3017	struct sk_buff *skb;
3018
3019	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3020	if (unlikely(err)) {
3021		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3022error:
3023		if (hdr_len) {
3024			unsigned int raw_len = len + 4;
3025			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3026
3027			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3028		}
3029		return NULL;
3030	}
3031
3032	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3033			 PCI_DMA_FROMDEVICE);
3034	skb = build_skb(data, 0);
3035	if (!skb) {
3036		kfree(data);
3037		goto error;
3038	}
3039	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3040	if (hdr_len == 0) {
3041		skb_put(skb, len);
3042		return skb;
3043	} else {
3044		unsigned int i, frag_len, frag_size, pages;
3045		struct bnx2_sw_pg *rx_pg;
3046		u16 pg_cons = rxr->rx_pg_cons;
3047		u16 pg_prod = rxr->rx_pg_prod;
3048
3049		frag_size = len + 4 - hdr_len;
3050		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3051		skb_put(skb, hdr_len);
3052
3053		for (i = 0; i < pages; i++) {
3054			dma_addr_t mapping_old;
3055
3056			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3057			if (unlikely(frag_len <= 4)) {
3058				unsigned int tail = 4 - frag_len;
3059
3060				rxr->rx_pg_cons = pg_cons;
3061				rxr->rx_pg_prod = pg_prod;
3062				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3063							pages - i);
3064				skb->len -= tail;
3065				if (i == 0) {
3066					skb->tail -= tail;
3067				} else {
3068					skb_frag_t *frag =
3069						&skb_shinfo(skb)->frags[i - 1];
3070					skb_frag_size_sub(frag, tail);
3071					skb->data_len -= tail;
3072				}
3073				return skb;
3074			}
3075			rx_pg = &rxr->rx_pg_ring[pg_cons];
3076
3077			/* Don't unmap yet.  If we're unable to allocate a new
3078			 * page, we need to recycle the page and the DMA addr.
3079			 */
3080			mapping_old = dma_unmap_addr(rx_pg, mapping);
3081			if (i == pages - 1)
3082				frag_len -= 4;
3083
3084			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3085			rx_pg->page = NULL;
3086
3087			err = bnx2_alloc_rx_page(bp, rxr,
3088						 BNX2_RX_PG_RING_IDX(pg_prod),
3089						 GFP_ATOMIC);
3090			if (unlikely(err)) {
3091				rxr->rx_pg_cons = pg_cons;
3092				rxr->rx_pg_prod = pg_prod;
3093				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3094							pages - i);
3095				return NULL;
3096			}
3097
3098			dma_unmap_page(&bp->pdev->dev, mapping_old,
3099				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3100
3101			frag_size -= frag_len;
3102			skb->data_len += frag_len;
3103			skb->truesize += PAGE_SIZE;
3104			skb->len += frag_len;
3105
3106			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3107			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3108		}
3109		rxr->rx_pg_prod = pg_prod;
3110		rxr->rx_pg_cons = pg_cons;
3111	}
3112	return skb;
3113}
3114
3115static inline u16
3116bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3117{
3118	u16 cons;
3119
3120	/* Tell compiler that status block fields can change. */
3121	barrier();
3122	cons = *bnapi->hw_rx_cons_ptr;
3123	barrier();
3124	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3125		cons++;
3126	return cons;
3127}
3128
3129static int
3130bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3131{
3132	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3133	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3134	struct l2_fhdr *rx_hdr;
3135	int rx_pkt = 0, pg_ring_used = 0;
3136
3137	if (budget <= 0)
3138		return rx_pkt;
3139
3140	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3141	sw_cons = rxr->rx_cons;
3142	sw_prod = rxr->rx_prod;
3143
3144	/* Memory barrier necessary as speculative reads of the rx
3145	 * buffer can be ahead of the index in the status block
3146	 */
3147	rmb();
3148	while (sw_cons != hw_cons) {
3149		unsigned int len, hdr_len;
3150		u32 status;
3151		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3152		struct sk_buff *skb;
3153		dma_addr_t dma_addr;
3154		u8 *data;
3155		u16 next_ring_idx;
3156
3157		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3158		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3159
3160		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3161		data = rx_buf->data;
3162		rx_buf->data = NULL;
3163
3164		rx_hdr = get_l2_fhdr(data);
3165		prefetch(rx_hdr);
3166
3167		dma_addr = dma_unmap_addr(rx_buf, mapping);
3168
3169		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3170			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3171			PCI_DMA_FROMDEVICE);
3172
3173		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3174		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3175		prefetch(get_l2_fhdr(next_rx_buf->data));
3176
3177		len = rx_hdr->l2_fhdr_pkt_len;
3178		status = rx_hdr->l2_fhdr_status;
3179
3180		hdr_len = 0;
3181		if (status & L2_FHDR_STATUS_SPLIT) {
3182			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3183			pg_ring_used = 1;
3184		} else if (len > bp->rx_jumbo_thresh) {
3185			hdr_len = bp->rx_jumbo_thresh;
3186			pg_ring_used = 1;
3187		}
3188
3189		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3190				       L2_FHDR_ERRORS_PHY_DECODE |
3191				       L2_FHDR_ERRORS_ALIGNMENT |
3192				       L2_FHDR_ERRORS_TOO_SHORT |
3193				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3194
3195			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3196					  sw_ring_prod);
3197			if (pg_ring_used) {
3198				int pages;
3199
3200				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3201
3202				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3203			}
3204			goto next_rx;
3205		}
3206
3207		len -= 4;
3208
3209		if (len <= bp->rx_copy_thresh) {
3210			skb = netdev_alloc_skb(bp->dev, len + 6);
3211			if (skb == NULL) {
3212				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3213						  sw_ring_prod);
3214				goto next_rx;
3215			}
3216
3217			/* aligned copy */
3218			memcpy(skb->data,
3219			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3220			       len + 6);
3221			skb_reserve(skb, 6);
3222			skb_put(skb, len);
3223
3224			bnx2_reuse_rx_data(bp, rxr, data,
3225				sw_ring_cons, sw_ring_prod);
3226
3227		} else {
3228			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3229					  (sw_ring_cons << 16) | sw_ring_prod);
3230			if (!skb)
3231				goto next_rx;
3232		}
3233		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3234		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3235			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3236
3237		skb->protocol = eth_type_trans(skb, bp->dev);
3238
3239		if (len > (bp->dev->mtu + ETH_HLEN) &&
3240		    skb->protocol != htons(0x8100) &&
3241		    skb->protocol != htons(ETH_P_8021AD)) {
3242
3243			dev_kfree_skb(skb);
3244			goto next_rx;
3245
3246		}
3247
3248		skb_checksum_none_assert(skb);
3249		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3250			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3251			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3252
3253			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3254					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3255				skb->ip_summed = CHECKSUM_UNNECESSARY;
3256		}
3257		if ((bp->dev->features & NETIF_F_RXHASH) &&
3258		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3259		     L2_FHDR_STATUS_USE_RXHASH))
3260			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3261				     PKT_HASH_TYPE_L3);
3262
3263		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3264		napi_gro_receive(&bnapi->napi, skb);
3265		rx_pkt++;
3266
3267next_rx:
3268		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3269		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3270
3271		if ((rx_pkt == budget))
3272			break;
3273
3274		/* Refresh hw_cons to see if there is new work */
3275		if (sw_cons == hw_cons) {
3276			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3277			rmb();
3278		}
3279	}
3280	rxr->rx_cons = sw_cons;
3281	rxr->rx_prod = sw_prod;
3282
3283	if (pg_ring_used)
3284		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3285
3286	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3287
3288	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3289
3290	mmiowb();
3291
3292	return rx_pkt;
3293
3294}
3295
3296/* MSI ISR - The only difference between this and the INTx ISR
3297 * is that the MSI interrupt is always serviced.
3298 */
3299static irqreturn_t
3300bnx2_msi(int irq, void *dev_instance)
3301{
3302	struct bnx2_napi *bnapi = dev_instance;
3303	struct bnx2 *bp = bnapi->bp;
3304
3305	prefetch(bnapi->status_blk.msi);
3306	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3307		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3308		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3309
3310	/* Return here if interrupt is disabled. */
3311	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3312		return IRQ_HANDLED;
3313
3314	napi_schedule(&bnapi->napi);
3315
3316	return IRQ_HANDLED;
3317}
3318
3319static irqreturn_t
3320bnx2_msi_1shot(int irq, void *dev_instance)
3321{
3322	struct bnx2_napi *bnapi = dev_instance;
3323	struct bnx2 *bp = bnapi->bp;
3324
3325	prefetch(bnapi->status_blk.msi);
3326
3327	/* Return here if interrupt is disabled. */
3328	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3329		return IRQ_HANDLED;
3330
3331	napi_schedule(&bnapi->napi);
3332
3333	return IRQ_HANDLED;
3334}
3335
3336static irqreturn_t
3337bnx2_interrupt(int irq, void *dev_instance)
3338{
3339	struct bnx2_napi *bnapi = dev_instance;
3340	struct bnx2 *bp = bnapi->bp;
3341	struct status_block *sblk = bnapi->status_blk.msi;
3342
3343	/* When using INTx, it is possible for the interrupt to arrive
3344	 * at the CPU before the status block posted prior to the
3345	 * interrupt. Reading a register will flush the status block.
3346	 * When using MSI, the MSI message will always complete after
3347	 * the status block write.
3348	 */
3349	if ((sblk->status_idx == bnapi->last_status_idx) &&
3350	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3351	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3352		return IRQ_NONE;
3353
3354	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3355		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3356		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3357
3358	/* Read back to deassert IRQ immediately to avoid too many
3359	 * spurious interrupts.
3360	 */
3361	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3362
3363	/* Return here if interrupt is shared and is disabled. */
3364	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3365		return IRQ_HANDLED;
3366
3367	if (napi_schedule_prep(&bnapi->napi)) {
3368		bnapi->last_status_idx = sblk->status_idx;
3369		__napi_schedule(&bnapi->napi);
3370	}
3371
3372	return IRQ_HANDLED;
3373}
3374
3375static inline int
3376bnx2_has_fast_work(struct bnx2_napi *bnapi)
3377{
3378	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3379	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3380
3381	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3382	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3383		return 1;
3384	return 0;
3385}
3386
3387#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3388				 STATUS_ATTN_BITS_TIMER_ABORT)
3389
3390static inline int
3391bnx2_has_work(struct bnx2_napi *bnapi)
3392{
3393	struct status_block *sblk = bnapi->status_blk.msi;
3394
3395	if (bnx2_has_fast_work(bnapi))
3396		return 1;
3397
3398#ifdef BCM_CNIC
3399	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3400		return 1;
3401#endif
3402
3403	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3404	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3405		return 1;
3406
3407	return 0;
3408}
3409
3410static void
3411bnx2_chk_missed_msi(struct bnx2 *bp)
3412{
3413	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3414	u32 msi_ctrl;
3415
3416	if (bnx2_has_work(bnapi)) {
3417		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3418		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3419			return;
3420
3421		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3422			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3423				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3424			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3425			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3426		}
3427	}
3428
3429	bp->idle_chk_status_idx = bnapi->last_status_idx;
3430}
3431
3432#ifdef BCM_CNIC
3433static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3434{
3435	struct cnic_ops *c_ops;
3436
3437	if (!bnapi->cnic_present)
3438		return;
3439
3440	rcu_read_lock();
3441	c_ops = rcu_dereference(bp->cnic_ops);
3442	if (c_ops)
3443		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3444						      bnapi->status_blk.msi);
3445	rcu_read_unlock();
3446}
3447#endif
3448
3449static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3450{
3451	struct status_block *sblk = bnapi->status_blk.msi;
3452	u32 status_attn_bits = sblk->status_attn_bits;
3453	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3454
3455	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3456	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3457
3458		bnx2_phy_int(bp, bnapi);
3459
3460		/* This is needed to take care of transient status
3461		 * during link changes.
3462		 */
3463		BNX2_WR(bp, BNX2_HC_COMMAND,
3464			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3465		BNX2_RD(bp, BNX2_HC_COMMAND);
3466	}
3467}
3468
3469static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3470			  int work_done, int budget)
3471{
3472	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3473	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3474
3475	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3476		bnx2_tx_int(bp, bnapi, 0);
3477
3478	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3479		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3480
3481	return work_done;
3482}
3483
3484static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3485{
3486	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3487	struct bnx2 *bp = bnapi->bp;
3488	int work_done = 0;
3489	struct status_block_msix *sblk = bnapi->status_blk.msix;
3490
3491	while (1) {
3492		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3493		if (unlikely(work_done >= budget))
3494			break;
3495
3496		bnapi->last_status_idx = sblk->status_idx;
3497		/* status idx must be read before checking for more work. */
3498		rmb();
3499		if (likely(!bnx2_has_fast_work(bnapi))) {
3500
3501			napi_complete(napi);
3502			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3503				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3504				bnapi->last_status_idx);
3505			break;
3506		}
3507	}
3508	return work_done;
3509}
3510
3511static int bnx2_poll(struct napi_struct *napi, int budget)
3512{
3513	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3514	struct bnx2 *bp = bnapi->bp;
3515	int work_done = 0;
3516	struct status_block *sblk = bnapi->status_blk.msi;
3517
3518	while (1) {
3519		bnx2_poll_link(bp, bnapi);
3520
3521		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3522
3523#ifdef BCM_CNIC
3524		bnx2_poll_cnic(bp, bnapi);
3525#endif
3526
3527		/* bnapi->last_status_idx is used below to tell the hw how
3528		 * much work has been processed, so we must read it before
3529		 * checking for more work.
3530		 */
3531		bnapi->last_status_idx = sblk->status_idx;
3532
3533		if (unlikely(work_done >= budget))
3534			break;
3535
3536		rmb();
3537		if (likely(!bnx2_has_work(bnapi))) {
3538			napi_complete(napi);
3539			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3540				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3541					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3542					bnapi->last_status_idx);
3543				break;
3544			}
3545			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3546				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3547				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3548				bnapi->last_status_idx);
3549
3550			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552				bnapi->last_status_idx);
3553			break;
3554		}
3555	}
3556
3557	return work_done;
3558}
3559
3560/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3561 * from set_multicast.
3562 */
3563static void
3564bnx2_set_rx_mode(struct net_device *dev)
3565{
3566	struct bnx2 *bp = netdev_priv(dev);
3567	u32 rx_mode, sort_mode;
3568	struct netdev_hw_addr *ha;
3569	int i;
3570
3571	if (!netif_running(dev))
3572		return;
3573
3574	spin_lock_bh(&bp->phy_lock);
3575
3576	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3577				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3578	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3579	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3580	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3581		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3582	if (dev->flags & IFF_PROMISC) {
3583		/* Promiscuous mode. */
3584		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3585		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3586			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3587	}
3588	else if (dev->flags & IFF_ALLMULTI) {
3589		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591				0xffffffff);
3592        	}
3593		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3594	}
3595	else {
3596		/* Accept one or more multicast(s). */
3597		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3598		u32 regidx;
3599		u32 bit;
3600		u32 crc;
3601
3602		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3603
3604		netdev_for_each_mc_addr(ha, dev) {
3605			crc = ether_crc_le(ETH_ALEN, ha->addr);
3606			bit = crc & 0xff;
3607			regidx = (bit & 0xe0) >> 5;
3608			bit &= 0x1f;
3609			mc_filter[regidx] |= (1 << bit);
3610		}
3611
3612		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3613			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3614				mc_filter[i]);
3615		}
3616
3617		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3618	}
3619
3620	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3621		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3622		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3623			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3624	} else if (!(dev->flags & IFF_PROMISC)) {
3625		/* Add all entries into to the match filter list */
3626		i = 0;
3627		netdev_for_each_uc_addr(ha, dev) {
3628			bnx2_set_mac_addr(bp, ha->addr,
3629					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3630			sort_mode |= (1 <<
3631				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3632			i++;
3633		}
3634
3635	}
3636
3637	if (rx_mode != bp->rx_mode) {
3638		bp->rx_mode = rx_mode;
3639		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3640	}
3641
3642	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3643	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3644	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3645
3646	spin_unlock_bh(&bp->phy_lock);
3647}
3648
3649static int
3650check_fw_section(const struct firmware *fw,
3651		 const struct bnx2_fw_file_section *section,
3652		 u32 alignment, bool non_empty)
3653{
3654	u32 offset = be32_to_cpu(section->offset);
3655	u32 len = be32_to_cpu(section->len);
3656
3657	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3658		return -EINVAL;
3659	if ((non_empty && len == 0) || len > fw->size - offset ||
3660	    len & (alignment - 1))
3661		return -EINVAL;
3662	return 0;
3663}
3664
3665static int
3666check_mips_fw_entry(const struct firmware *fw,
3667		    const struct bnx2_mips_fw_file_entry *entry)
3668{
3669	if (check_fw_section(fw, &entry->text, 4, true) ||
3670	    check_fw_section(fw, &entry->data, 4, false) ||
3671	    check_fw_section(fw, &entry->rodata, 4, false))
3672		return -EINVAL;
3673	return 0;
3674}
3675
3676static void bnx2_release_firmware(struct bnx2 *bp)
3677{
3678	if (bp->rv2p_firmware) {
3679		release_firmware(bp->mips_firmware);
3680		release_firmware(bp->rv2p_firmware);
3681		bp->rv2p_firmware = NULL;
3682	}
3683}
3684
3685static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3686{
3687	const char *mips_fw_file, *rv2p_fw_file;
3688	const struct bnx2_mips_fw_file *mips_fw;
3689	const struct bnx2_rv2p_fw_file *rv2p_fw;
3690	int rc;
3691
3692	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3693		mips_fw_file = FW_MIPS_FILE_09;
3694		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3695		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3696			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3697		else
3698			rv2p_fw_file = FW_RV2P_FILE_09;
3699	} else {
3700		mips_fw_file = FW_MIPS_FILE_06;
3701		rv2p_fw_file = FW_RV2P_FILE_06;
3702	}
3703
3704	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3705	if (rc) {
3706		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3707		goto out;
3708	}
3709
3710	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3711	if (rc) {
3712		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3713		goto err_release_mips_firmware;
3714	}
3715	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3716	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3717	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3718	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3719	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3720	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3721	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3722	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3723		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3724		rc = -EINVAL;
3725		goto err_release_firmware;
3726	}
3727	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3728	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3729	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3730		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3731		rc = -EINVAL;
3732		goto err_release_firmware;
3733	}
3734out:
3735	return rc;
3736
3737err_release_firmware:
3738	release_firmware(bp->rv2p_firmware);
3739	bp->rv2p_firmware = NULL;
3740err_release_mips_firmware:
3741	release_firmware(bp->mips_firmware);
3742	goto out;
3743}
3744
3745static int bnx2_request_firmware(struct bnx2 *bp)
3746{
3747	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3748}
3749
3750static u32
3751rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3752{
3753	switch (idx) {
3754	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3755		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3756		rv2p_code |= RV2P_BD_PAGE_SIZE;
3757		break;
3758	}
3759	return rv2p_code;
3760}
3761
3762static int
3763load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3764	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3765{
3766	u32 rv2p_code_len, file_offset;
3767	__be32 *rv2p_code;
3768	int i;
3769	u32 val, cmd, addr;
3770
3771	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3772	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3773
3774	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3775
3776	if (rv2p_proc == RV2P_PROC1) {
3777		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3778		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3779	} else {
3780		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3781		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3782	}
3783
3784	for (i = 0; i < rv2p_code_len; i += 8) {
3785		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3786		rv2p_code++;
3787		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3788		rv2p_code++;
3789
3790		val = (i / 8) | cmd;
3791		BNX2_WR(bp, addr, val);
3792	}
3793
3794	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3795	for (i = 0; i < 8; i++) {
3796		u32 loc, code;
3797
3798		loc = be32_to_cpu(fw_entry->fixup[i]);
3799		if (loc && ((loc * 4) < rv2p_code_len)) {
3800			code = be32_to_cpu(*(rv2p_code + loc - 1));
3801			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3802			code = be32_to_cpu(*(rv2p_code + loc));
3803			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3804			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3805
3806			val = (loc / 2) | cmd;
3807			BNX2_WR(bp, addr, val);
3808		}
3809	}
3810
3811	/* Reset the processor, un-stall is done later. */
3812	if (rv2p_proc == RV2P_PROC1) {
3813		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3814	}
3815	else {
3816		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3817	}
3818
3819	return 0;
3820}
3821
3822static int
3823load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3824	    const struct bnx2_mips_fw_file_entry *fw_entry)
3825{
3826	u32 addr, len, file_offset;
3827	__be32 *data;
3828	u32 offset;
3829	u32 val;
3830
3831	/* Halt the CPU. */
3832	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3833	val |= cpu_reg->mode_value_halt;
3834	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3835	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3836
3837	/* Load the Text area. */
3838	addr = be32_to_cpu(fw_entry->text.addr);
3839	len = be32_to_cpu(fw_entry->text.len);
3840	file_offset = be32_to_cpu(fw_entry->text.offset);
3841	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3842
3843	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3844	if (len) {
3845		int j;
3846
3847		for (j = 0; j < (len / 4); j++, offset += 4)
3848			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3849	}
3850
3851	/* Load the Data area. */
3852	addr = be32_to_cpu(fw_entry->data.addr);
3853	len = be32_to_cpu(fw_entry->data.len);
3854	file_offset = be32_to_cpu(fw_entry->data.offset);
3855	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3856
3857	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3858	if (len) {
3859		int j;
3860
3861		for (j = 0; j < (len / 4); j++, offset += 4)
3862			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863	}
3864
3865	/* Load the Read-Only area. */
3866	addr = be32_to_cpu(fw_entry->rodata.addr);
3867	len = be32_to_cpu(fw_entry->rodata.len);
3868	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3869	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3870
3871	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3872	if (len) {
3873		int j;
3874
3875		for (j = 0; j < (len / 4); j++, offset += 4)
3876			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877	}
3878
3879	/* Clear the pre-fetch instruction. */
3880	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3881
3882	val = be32_to_cpu(fw_entry->start_addr);
3883	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3884
3885	/* Start the CPU. */
3886	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3887	val &= ~cpu_reg->mode_value_halt;
3888	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3889	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3890
3891	return 0;
3892}
3893
3894static int
3895bnx2_init_cpus(struct bnx2 *bp)
3896{
3897	const struct bnx2_mips_fw_file *mips_fw =
3898		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3899	const struct bnx2_rv2p_fw_file *rv2p_fw =
3900		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3901	int rc;
3902
3903	/* Initialize the RV2P processor. */
3904	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3905	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3906
3907	/* Initialize the RX Processor. */
3908	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3909	if (rc)
3910		goto init_cpu_err;
3911
3912	/* Initialize the TX Processor. */
3913	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3914	if (rc)
3915		goto init_cpu_err;
3916
3917	/* Initialize the TX Patch-up Processor. */
3918	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3919	if (rc)
3920		goto init_cpu_err;
3921
3922	/* Initialize the Completion Processor. */
3923	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3924	if (rc)
3925		goto init_cpu_err;
3926
3927	/* Initialize the Command Processor. */
3928	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3929
3930init_cpu_err:
3931	return rc;
3932}
3933
3934static void
3935bnx2_setup_wol(struct bnx2 *bp)
3936{
3937	int i;
3938	u32 val, wol_msg;
3939
3940	if (bp->wol) {
3941		u32 advertising;
3942		u8 autoneg;
3943
3944		autoneg = bp->autoneg;
3945		advertising = bp->advertising;
3946
3947		if (bp->phy_port == PORT_TP) {
3948			bp->autoneg = AUTONEG_SPEED;
3949			bp->advertising = ADVERTISED_10baseT_Half |
3950				ADVERTISED_10baseT_Full |
3951				ADVERTISED_100baseT_Half |
3952				ADVERTISED_100baseT_Full |
3953				ADVERTISED_Autoneg;
3954		}
3955
3956		spin_lock_bh(&bp->phy_lock);
3957		bnx2_setup_phy(bp, bp->phy_port);
3958		spin_unlock_bh(&bp->phy_lock);
3959
3960		bp->autoneg = autoneg;
3961		bp->advertising = advertising;
3962
3963		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3964
3965		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3966
3967		/* Enable port mode. */
3968		val &= ~BNX2_EMAC_MODE_PORT;
3969		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3970		       BNX2_EMAC_MODE_ACPI_RCVD |
3971		       BNX2_EMAC_MODE_MPKT;
3972		if (bp->phy_port == PORT_TP) {
3973			val |= BNX2_EMAC_MODE_PORT_MII;
3974		} else {
3975			val |= BNX2_EMAC_MODE_PORT_GMII;
3976			if (bp->line_speed == SPEED_2500)
3977				val |= BNX2_EMAC_MODE_25G_MODE;
3978		}
3979
3980		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3981
3982		/* receive all multicast */
3983		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3984			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3985				0xffffffff);
3986		}
3987		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3988
3989		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3990		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3991		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3992		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3993
3994		/* Need to enable EMAC and RPM for WOL. */
3995		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3996			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3997			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3998			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3999
4000		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4001		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4002		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4003
4004		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4005	} else {
4006			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4007	}
4008
4009	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4010		u32 val;
4011
4012		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4013		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4014			bnx2_fw_sync(bp, wol_msg, 1, 0);
4015			return;
4016		}
4017		/* Tell firmware not to power down the PHY yet, otherwise
4018		 * the chip will take a long time to respond to MMIO reads.
4019		 */
4020		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4021		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4022			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4023		bnx2_fw_sync(bp, wol_msg, 1, 0);
4024		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4025	}
4026
4027}
4028
4029static int
4030bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4031{
4032	switch (state) {
4033	case PCI_D0: {
4034		u32 val;
4035
4036		pci_enable_wake(bp->pdev, PCI_D0, false);
4037		pci_set_power_state(bp->pdev, PCI_D0);
4038
4039		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4040		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4041		val &= ~BNX2_EMAC_MODE_MPKT;
4042		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4043
4044		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4045		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4046		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4047		break;
4048	}
4049	case PCI_D3hot: {
4050		bnx2_setup_wol(bp);
4051		pci_wake_from_d3(bp->pdev, bp->wol);
4052		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4053		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4054
4055			if (bp->wol)
4056				pci_set_power_state(bp->pdev, PCI_D3hot);
4057			break;
4058
4059		}
4060		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4061			u32 val;
4062
4063			/* Tell firmware not to power down the PHY yet,
4064			 * otherwise the other port may not respond to
4065			 * MMIO reads.
4066			 */
4067			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4068			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4069			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4070			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4071		}
4072		pci_set_power_state(bp->pdev, PCI_D3hot);
4073
4074		/* No more memory access after this point until
4075		 * device is brought back to D0.
4076		 */
4077		break;
4078	}
4079	default:
4080		return -EINVAL;
4081	}
4082	return 0;
4083}
4084
4085static int
4086bnx2_acquire_nvram_lock(struct bnx2 *bp)
4087{
4088	u32 val;
4089	int j;
4090
4091	/* Request access to the flash interface. */
4092	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4093	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4094		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4095		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4096			break;
4097
4098		udelay(5);
4099	}
4100
4101	if (j >= NVRAM_TIMEOUT_COUNT)
4102		return -EBUSY;
4103
4104	return 0;
4105}
4106
4107static int
4108bnx2_release_nvram_lock(struct bnx2 *bp)
4109{
4110	int j;
4111	u32 val;
4112
4113	/* Relinquish nvram interface. */
4114	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4115
4116	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4117		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4118		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4119			break;
4120
4121		udelay(5);
4122	}
4123
4124	if (j >= NVRAM_TIMEOUT_COUNT)
4125		return -EBUSY;
4126
4127	return 0;
4128}
4129
4130
4131static int
4132bnx2_enable_nvram_write(struct bnx2 *bp)
4133{
4134	u32 val;
4135
4136	val = BNX2_RD(bp, BNX2_MISC_CFG);
4137	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4138
4139	if (bp->flash_info->flags & BNX2_NV_WREN) {
4140		int j;
4141
4142		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4143		BNX2_WR(bp, BNX2_NVM_COMMAND,
4144			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4145
4146		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4147			udelay(5);
4148
4149			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4150			if (val & BNX2_NVM_COMMAND_DONE)
4151				break;
4152		}
4153
4154		if (j >= NVRAM_TIMEOUT_COUNT)
4155			return -EBUSY;
4156	}
4157	return 0;
4158}
4159
4160static void
4161bnx2_disable_nvram_write(struct bnx2 *bp)
4162{
4163	u32 val;
4164
4165	val = BNX2_RD(bp, BNX2_MISC_CFG);
4166	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4167}
4168
4169
4170static void
4171bnx2_enable_nvram_access(struct bnx2 *bp)
4172{
4173	u32 val;
4174
4175	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4176	/* Enable both bits, even on read. */
4177	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4178		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4179}
4180
4181static void
4182bnx2_disable_nvram_access(struct bnx2 *bp)
4183{
4184	u32 val;
4185
4186	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4187	/* Disable both bits, even after read. */
4188	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4189		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4190			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4191}
4192
4193static int
4194bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4195{
4196	u32 cmd;
4197	int j;
4198
4199	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4200		/* Buffered flash, no erase needed */
4201		return 0;
4202
4203	/* Build an erase command */
4204	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4205	      BNX2_NVM_COMMAND_DOIT;
4206
4207	/* Need to clear DONE bit separately. */
4208	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4209
4210	/* Address of the NVRAM to read from. */
4211	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4212
4213	/* Issue an erase command. */
4214	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4215
4216	/* Wait for completion. */
4217	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4218		u32 val;
4219
4220		udelay(5);
4221
4222		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4223		if (val & BNX2_NVM_COMMAND_DONE)
4224			break;
4225	}
4226
4227	if (j >= NVRAM_TIMEOUT_COUNT)
4228		return -EBUSY;
4229
4230	return 0;
4231}
4232
4233static int
4234bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4235{
4236	u32 cmd;
4237	int j;
4238
4239	/* Build the command word. */
4240	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4241
4242	/* Calculate an offset of a buffered flash, not needed for 5709. */
4243	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4244		offset = ((offset / bp->flash_info->page_size) <<
4245			   bp->flash_info->page_bits) +
4246			  (offset % bp->flash_info->page_size);
4247	}
4248
4249	/* Need to clear DONE bit separately. */
4250	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4251
4252	/* Address of the NVRAM to read from. */
4253	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254
4255	/* Issue a read command. */
4256	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257
4258	/* Wait for completion. */
4259	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4260		u32 val;
4261
4262		udelay(5);
4263
4264		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4265		if (val & BNX2_NVM_COMMAND_DONE) {
4266			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4267			memcpy(ret_val, &v, 4);
4268			break;
4269		}
4270	}
4271	if (j >= NVRAM_TIMEOUT_COUNT)
4272		return -EBUSY;
4273
4274	return 0;
4275}
4276
4277
4278static int
4279bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4280{
4281	u32 cmd;
4282	__be32 val32;
4283	int j;
4284
4285	/* Build the command word. */
4286	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4287
4288	/* Calculate an offset of a buffered flash, not needed for 5709. */
4289	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4290		offset = ((offset / bp->flash_info->page_size) <<
4291			  bp->flash_info->page_bits) +
4292			 (offset % bp->flash_info->page_size);
4293	}
4294
4295	/* Need to clear DONE bit separately. */
4296	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4297
4298	memcpy(&val32, val, 4);
4299
4300	/* Write the data. */
4301	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4302
4303	/* Address of the NVRAM to write to. */
4304	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4305
4306	/* Issue the write command. */
4307	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4308
4309	/* Wait for completion. */
4310	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4311		udelay(5);
4312
4313		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4314			break;
4315	}
4316	if (j >= NVRAM_TIMEOUT_COUNT)
4317		return -EBUSY;
4318
4319	return 0;
4320}
4321
4322static int
4323bnx2_init_nvram(struct bnx2 *bp)
4324{
4325	u32 val;
4326	int j, entry_count, rc = 0;
4327	const struct flash_spec *flash;
4328
4329	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4330		bp->flash_info = &flash_5709;
4331		goto get_flash_size;
4332	}
4333
4334	/* Determine the selected interface. */
4335	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4336
4337	entry_count = ARRAY_SIZE(flash_table);
4338
4339	if (val & 0x40000000) {
4340
4341		/* Flash interface has been reconfigured */
4342		for (j = 0, flash = &flash_table[0]; j < entry_count;
4343		     j++, flash++) {
4344			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4345			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4346				bp->flash_info = flash;
4347				break;
4348			}
4349		}
4350	}
4351	else {
4352		u32 mask;
4353		/* Not yet been reconfigured */
4354
4355		if (val & (1 << 23))
4356			mask = FLASH_BACKUP_STRAP_MASK;
4357		else
4358			mask = FLASH_STRAP_MASK;
4359
4360		for (j = 0, flash = &flash_table[0]; j < entry_count;
4361			j++, flash++) {
4362
4363			if ((val & mask) == (flash->strapping & mask)) {
4364				bp->flash_info = flash;
4365
4366				/* Request access to the flash interface. */
4367				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4368					return rc;
4369
4370				/* Enable access to flash interface */
4371				bnx2_enable_nvram_access(bp);
4372
4373				/* Reconfigure the flash interface */
4374				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4375				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4376				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4377				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4378
4379				/* Disable access to flash interface */
4380				bnx2_disable_nvram_access(bp);
4381				bnx2_release_nvram_lock(bp);
4382
4383				break;
4384			}
4385		}
4386	} /* if (val & 0x40000000) */
4387
4388	if (j == entry_count) {
4389		bp->flash_info = NULL;
4390		pr_alert("Unknown flash/EEPROM type\n");
4391		return -ENODEV;
4392	}
4393
4394get_flash_size:
4395	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4396	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4397	if (val)
4398		bp->flash_size = val;
4399	else
4400		bp->flash_size = bp->flash_info->total_size;
4401
4402	return rc;
4403}
4404
4405static int
4406bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4407		int buf_size)
4408{
4409	int rc = 0;
4410	u32 cmd_flags, offset32, len32, extra;
4411
4412	if (buf_size == 0)
4413		return 0;
4414
4415	/* Request access to the flash interface. */
4416	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4417		return rc;
4418
4419	/* Enable access to flash interface */
4420	bnx2_enable_nvram_access(bp);
4421
4422	len32 = buf_size;
4423	offset32 = offset;
4424	extra = 0;
4425
4426	cmd_flags = 0;
4427
4428	if (offset32 & 3) {
4429		u8 buf[4];
4430		u32 pre_len;
4431
4432		offset32 &= ~3;
4433		pre_len = 4 - (offset & 3);
4434
4435		if (pre_len >= len32) {
4436			pre_len = len32;
4437			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4438				    BNX2_NVM_COMMAND_LAST;
4439		}
4440		else {
4441			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4442		}
4443
4444		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4445
4446		if (rc)
4447			return rc;
4448
4449		memcpy(ret_buf, buf + (offset & 3), pre_len);
4450
4451		offset32 += 4;
4452		ret_buf += pre_len;
4453		len32 -= pre_len;
4454	}
4455	if (len32 & 3) {
4456		extra = 4 - (len32 & 3);
4457		len32 = (len32 + 4) & ~3;
4458	}
4459
4460	if (len32 == 4) {
4461		u8 buf[4];
4462
4463		if (cmd_flags)
4464			cmd_flags = BNX2_NVM_COMMAND_LAST;
4465		else
4466			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4467				    BNX2_NVM_COMMAND_LAST;
4468
4469		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4470
4471		memcpy(ret_buf, buf, 4 - extra);
4472	}
4473	else if (len32 > 0) {
4474		u8 buf[4];
4475
4476		/* Read the first word. */
4477		if (cmd_flags)
4478			cmd_flags = 0;
4479		else
4480			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4481
4482		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4483
4484		/* Advance to the next dword. */
4485		offset32 += 4;
4486		ret_buf += 4;
4487		len32 -= 4;
4488
4489		while (len32 > 4 && rc == 0) {
4490			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4491
4492			/* Advance to the next dword. */
4493			offset32 += 4;
4494			ret_buf += 4;
4495			len32 -= 4;
4496		}
4497
4498		if (rc)
4499			return rc;
4500
4501		cmd_flags = BNX2_NVM_COMMAND_LAST;
4502		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4503
4504		memcpy(ret_buf, buf, 4 - extra);
4505	}
4506
4507	/* Disable access to flash interface */
4508	bnx2_disable_nvram_access(bp);
4509
4510	bnx2_release_nvram_lock(bp);
4511
4512	return rc;
4513}
4514
4515static int
4516bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4517		int buf_size)
4518{
4519	u32 written, offset32, len32;
4520	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4521	int rc = 0;
4522	int align_start, align_end;
4523
4524	buf = data_buf;
4525	offset32 = offset;
4526	len32 = buf_size;
4527	align_start = align_end = 0;
4528
4529	if ((align_start = (offset32 & 3))) {
4530		offset32 &= ~3;
4531		len32 += align_start;
4532		if (len32 < 4)
4533			len32 = 4;
4534		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4535			return rc;
4536	}
4537
4538	if (len32 & 3) {
4539		align_end = 4 - (len32 & 3);
4540		len32 += align_end;
4541		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4542			return rc;
4543	}
4544
4545	if (align_start || align_end) {
4546		align_buf = kmalloc(len32, GFP_KERNEL);
4547		if (align_buf == NULL)
4548			return -ENOMEM;
4549		if (align_start) {
4550			memcpy(align_buf, start, 4);
4551		}
4552		if (align_end) {
4553			memcpy(align_buf + len32 - 4, end, 4);
4554		}
4555		memcpy(align_buf + align_start, data_buf, buf_size);
4556		buf = align_buf;
4557	}
4558
4559	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4560		flash_buffer = kmalloc(264, GFP_KERNEL);
4561		if (flash_buffer == NULL) {
4562			rc = -ENOMEM;
4563			goto nvram_write_end;
4564		}
4565	}
4566
4567	written = 0;
4568	while ((written < len32) && (rc == 0)) {
4569		u32 page_start, page_end, data_start, data_end;
4570		u32 addr, cmd_flags;
4571		int i;
4572
4573	        /* Find the page_start addr */
4574		page_start = offset32 + written;
4575		page_start -= (page_start % bp->flash_info->page_size);
4576		/* Find the page_end addr */
4577		page_end = page_start + bp->flash_info->page_size;
4578		/* Find the data_start addr */
4579		data_start = (written == 0) ? offset32 : page_start;
4580		/* Find the data_end addr */
4581		data_end = (page_end > offset32 + len32) ?
4582			(offset32 + len32) : page_end;
4583
4584		/* Request access to the flash interface. */
4585		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4586			goto nvram_write_end;
4587
4588		/* Enable access to flash interface */
4589		bnx2_enable_nvram_access(bp);
4590
4591		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4592		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4593			int j;
4594
4595			/* Read the whole page into the buffer
4596			 * (non-buffer flash only) */
4597			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4598				if (j == (bp->flash_info->page_size - 4)) {
4599					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4600				}
4601				rc = bnx2_nvram_read_dword(bp,
4602					page_start + j,
4603					&flash_buffer[j],
4604					cmd_flags);
4605
4606				if (rc)
4607					goto nvram_write_end;
4608
4609				cmd_flags = 0;
4610			}
4611		}
4612
4613		/* Enable writes to flash interface (unlock write-protect) */
4614		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4615			goto nvram_write_end;
4616
4617		/* Loop to write back the buffer data from page_start to
4618		 * data_start */
4619		i = 0;
4620		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4621			/* Erase the page */
4622			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4623				goto nvram_write_end;
4624
4625			/* Re-enable the write again for the actual write */
4626			bnx2_enable_nvram_write(bp);
4627
4628			for (addr = page_start; addr < data_start;
4629				addr += 4, i += 4) {
4630
4631				rc = bnx2_nvram_write_dword(bp, addr,
4632					&flash_buffer[i], cmd_flags);
4633
4634				if (rc != 0)
4635					goto nvram_write_end;
4636
4637				cmd_flags = 0;
4638			}
4639		}
4640
4641		/* Loop to write the new data from data_start to data_end */
4642		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4643			if ((addr == page_end - 4) ||
4644				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4645				 (addr == data_end - 4))) {
4646
4647				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4648			}
4649			rc = bnx2_nvram_write_dword(bp, addr, buf,
4650				cmd_flags);
4651
4652			if (rc != 0)
4653				goto nvram_write_end;
4654
4655			cmd_flags = 0;
4656			buf += 4;
4657		}
4658
4659		/* Loop to write back the buffer data from data_end
4660		 * to page_end */
4661		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4662			for (addr = data_end; addr < page_end;
4663				addr += 4, i += 4) {
4664
4665				if (addr == page_end-4) {
4666					cmd_flags = BNX2_NVM_COMMAND_LAST;
4667                		}
4668				rc = bnx2_nvram_write_dword(bp, addr,
4669					&flash_buffer[i], cmd_flags);
4670
4671				if (rc != 0)
4672					goto nvram_write_end;
4673
4674				cmd_flags = 0;
4675			}
4676		}
4677
4678		/* Disable writes to flash interface (lock write-protect) */
4679		bnx2_disable_nvram_write(bp);
4680
4681		/* Disable access to flash interface */
4682		bnx2_disable_nvram_access(bp);
4683		bnx2_release_nvram_lock(bp);
4684
4685		/* Increment written */
4686		written += data_end - data_start;
4687	}
4688
4689nvram_write_end:
4690	kfree(flash_buffer);
4691	kfree(align_buf);
4692	return rc;
4693}
4694
4695static void
4696bnx2_init_fw_cap(struct bnx2 *bp)
4697{
4698	u32 val, sig = 0;
4699
4700	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4701	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4702
4703	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4704		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4705
4706	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4707	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4708		return;
4709
4710	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4711		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4712		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4713	}
4714
4715	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4716	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4717		u32 link;
4718
4719		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4720
4721		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4722		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4723			bp->phy_port = PORT_FIBRE;
4724		else
4725			bp->phy_port = PORT_TP;
4726
4727		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4728		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4729	}
4730
4731	if (netif_running(bp->dev) && sig)
4732		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4733}
4734
4735static void
4736bnx2_setup_msix_tbl(struct bnx2 *bp)
4737{
4738	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4739
4740	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4741	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4742}
4743
4744static int
4745bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4746{
4747	u32 val;
4748	int i, rc = 0;
4749	u8 old_port;
4750
4751	/* Wait for the current PCI transaction to complete before
4752	 * issuing a reset. */
4753	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4754	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4755		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4756			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4757			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4758			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4759			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4760		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4761		udelay(5);
4762	} else {  /* 5709 */
4763		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4764		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4765		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4766		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4767
4768		for (i = 0; i < 100; i++) {
4769			msleep(1);
4770			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4771			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4772				break;
4773		}
4774	}
4775
4776	/* Wait for the firmware to tell us it is ok to issue a reset. */
4777	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4778
4779	/* Deposit a driver reset signature so the firmware knows that
4780	 * this is a soft reset. */
4781	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4782		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4783
4784	/* Do a dummy read to force the chip to complete all current transaction
4785	 * before we issue a reset. */
4786	val = BNX2_RD(bp, BNX2_MISC_ID);
4787
4788	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4789		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4790		BNX2_RD(bp, BNX2_MISC_COMMAND);
4791		udelay(5);
4792
4793		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4794		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4795
4796		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4797
4798	} else {
4799		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4800		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4801		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4802
4803		/* Chip reset. */
4804		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4805
4806		/* Reading back any register after chip reset will hang the
4807		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4808		 * of margin for write posting.
4809		 */
4810		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4811		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4812			msleep(20);
4813
4814		/* Reset takes approximate 30 usec */
4815		for (i = 0; i < 10; i++) {
4816			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4817			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4818				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4819				break;
4820			udelay(10);
4821		}
4822
4823		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4824			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4825			pr_err("Chip reset did not complete\n");
4826			return -EBUSY;
4827		}
4828	}
4829
4830	/* Make sure byte swapping is properly configured. */
4831	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4832	if (val != 0x01020304) {
4833		pr_err("Chip not in correct endian mode\n");
4834		return -ENODEV;
4835	}
4836
4837	/* Wait for the firmware to finish its initialization. */
4838	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4839	if (rc)
4840		return rc;
4841
4842	spin_lock_bh(&bp->phy_lock);
4843	old_port = bp->phy_port;
4844	bnx2_init_fw_cap(bp);
4845	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4846	    old_port != bp->phy_port)
4847		bnx2_set_default_remote_link(bp);
4848	spin_unlock_bh(&bp->phy_lock);
4849
4850	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4851		/* Adjust the voltage regular to two steps lower.  The default
4852		 * of this register is 0x0000000e. */
4853		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4854
4855		/* Remove bad rbuf memory from the free pool. */
4856		rc = bnx2_alloc_bad_rbuf(bp);
4857	}
4858
4859	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4860		bnx2_setup_msix_tbl(bp);
4861		/* Prevent MSIX table reads and write from timing out */
4862		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4863			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4864	}
4865
4866	return rc;
4867}
4868
4869static int
4870bnx2_init_chip(struct bnx2 *bp)
4871{
4872	u32 val, mtu;
4873	int rc, i;
4874
4875	/* Make sure the interrupt is not active. */
4876	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4877
4878	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4879	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4880#ifdef __BIG_ENDIAN
4881	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4882#endif
4883	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4884	      DMA_READ_CHANS << 12 |
4885	      DMA_WRITE_CHANS << 16;
4886
4887	val |= (0x2 << 20) | (1 << 11);
4888
4889	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4890		val |= (1 << 23);
4891
4892	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4893	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4894	    !(bp->flags & BNX2_FLAG_PCIX))
4895		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4896
4897	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4898
4899	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4900		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4901		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4902		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4903	}
4904
4905	if (bp->flags & BNX2_FLAG_PCIX) {
4906		u16 val16;
4907
4908		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4909				     &val16);
4910		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4911				      val16 & ~PCI_X_CMD_ERO);
4912	}
4913
4914	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4915		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4916		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4917		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4918
4919	/* Initialize context mapping and zero out the quick contexts.  The
4920	 * context block must have already been enabled. */
4921	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4922		rc = bnx2_init_5709_context(bp);
4923		if (rc)
4924			return rc;
4925	} else
4926		bnx2_init_context(bp);
4927
4928	if ((rc = bnx2_init_cpus(bp)) != 0)
4929		return rc;
4930
4931	bnx2_init_nvram(bp);
4932
4933	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4934
4935	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4936	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4937	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4938	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4939		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4940		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4941			val |= BNX2_MQ_CONFIG_HALT_DIS;
4942	}
4943
4944	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4945
4946	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4947	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4948	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4949
4950	val = (BNX2_PAGE_BITS - 8) << 24;
4951	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4952
4953	/* Configure page size. */
4954	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4955	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4956	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4957	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4958
4959	val = bp->mac_addr[0] +
4960	      (bp->mac_addr[1] << 8) +
4961	      (bp->mac_addr[2] << 16) +
4962	      bp->mac_addr[3] +
4963	      (bp->mac_addr[4] << 8) +
4964	      (bp->mac_addr[5] << 16);
4965	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4966
4967	/* Program the MTU.  Also include 4 bytes for CRC32. */
4968	mtu = bp->dev->mtu;
4969	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4970	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4971		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4972	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4973
4974	if (mtu < 1500)
4975		mtu = 1500;
4976
4977	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4978	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4979	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4980
4981	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4982	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4983		bp->bnx2_napi[i].last_status_idx = 0;
4984
4985	bp->idle_chk_status_idx = 0xffff;
4986
4987	/* Set up how to generate a link change interrupt. */
4988	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4989
4990	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4991		(u64) bp->status_blk_mapping & 0xffffffff);
4992	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4993
4994	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4995		(u64) bp->stats_blk_mapping & 0xffffffff);
4996	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4997		(u64) bp->stats_blk_mapping >> 32);
4998
4999	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5000		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5001
5002	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5003		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5004
5005	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5006		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5007
5008	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5009
5010	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5011
5012	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5013		(bp->com_ticks_int << 16) | bp->com_ticks);
5014
5015	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5016		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5017
5018	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5019		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5020	else
5021		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5022	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5023
5024	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5025		val = BNX2_HC_CONFIG_COLLECT_STATS;
5026	else {
5027		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5028		      BNX2_HC_CONFIG_COLLECT_STATS;
5029	}
5030
5031	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5032		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5033			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5034
5035		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5036	}
5037
5038	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5039		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5040
5041	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5042
5043	if (bp->rx_ticks < 25)
5044		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5045	else
5046		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5047
5048	for (i = 1; i < bp->irq_nvecs; i++) {
5049		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5050			   BNX2_HC_SB_CONFIG_1;
5051
5052		BNX2_WR(bp, base,
5053			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5054			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5055			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5056
5057		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5058			(bp->tx_quick_cons_trip_int << 16) |
5059			 bp->tx_quick_cons_trip);
5060
5061		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5062			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5063
5064		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5065			(bp->rx_quick_cons_trip_int << 16) |
5066			bp->rx_quick_cons_trip);
5067
5068		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5069			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5070	}
5071
5072	/* Clear internal stats counters. */
5073	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5074
5075	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5076
5077	/* Initialize the receive filter. */
5078	bnx2_set_rx_mode(bp->dev);
5079
5080	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5081		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5082		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5083		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5084	}
5085	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5086			  1, 0);
5087
5088	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5089	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5090
5091	udelay(20);
5092
5093	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5094
5095	return rc;
5096}
5097
5098static void
5099bnx2_clear_ring_states(struct bnx2 *bp)
5100{
5101	struct bnx2_napi *bnapi;
5102	struct bnx2_tx_ring_info *txr;
5103	struct bnx2_rx_ring_info *rxr;
5104	int i;
5105
5106	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5107		bnapi = &bp->bnx2_napi[i];
5108		txr = &bnapi->tx_ring;
5109		rxr = &bnapi->rx_ring;
5110
5111		txr->tx_cons = 0;
5112		txr->hw_tx_cons = 0;
5113		rxr->rx_prod_bseq = 0;
5114		rxr->rx_prod = 0;
5115		rxr->rx_cons = 0;
5116		rxr->rx_pg_prod = 0;
5117		rxr->rx_pg_cons = 0;
5118	}
5119}
5120
5121static void
5122bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5123{
5124	u32 val, offset0, offset1, offset2, offset3;
5125	u32 cid_addr = GET_CID_ADDR(cid);
5126
5127	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5128		offset0 = BNX2_L2CTX_TYPE_XI;
5129		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5130		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5131		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5132	} else {
5133		offset0 = BNX2_L2CTX_TYPE;
5134		offset1 = BNX2_L2CTX_CMD_TYPE;
5135		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5136		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5137	}
5138	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5139	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5140
5141	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5142	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5143
5144	val = (u64) txr->tx_desc_mapping >> 32;
5145	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5146
5147	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5148	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5149}
5150
5151static void
5152bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5153{
5154	struct bnx2_tx_bd *txbd;
5155	u32 cid = TX_CID;
5156	struct bnx2_napi *bnapi;
5157	struct bnx2_tx_ring_info *txr;
5158
5159	bnapi = &bp->bnx2_napi[ring_num];
5160	txr = &bnapi->tx_ring;
5161
5162	if (ring_num == 0)
5163		cid = TX_CID;
5164	else
5165		cid = TX_TSS_CID + ring_num - 1;
5166
5167	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5168
5169	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5170
5171	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5172	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5173
5174	txr->tx_prod = 0;
5175	txr->tx_prod_bseq = 0;
5176
5177	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5178	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5179
5180	bnx2_init_tx_context(bp, cid, txr);
5181}
5182
5183static void
5184bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5185		     u32 buf_size, int num_rings)
5186{
5187	int i;
5188	struct bnx2_rx_bd *rxbd;
5189
5190	for (i = 0; i < num_rings; i++) {
5191		int j;
5192
5193		rxbd = &rx_ring[i][0];
5194		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5195			rxbd->rx_bd_len = buf_size;
5196			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5197		}
5198		if (i == (num_rings - 1))
5199			j = 0;
5200		else
5201			j = i + 1;
5202		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5203		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5204	}
5205}
5206
5207static void
5208bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5209{
5210	int i;
5211	u16 prod, ring_prod;
5212	u32 cid, rx_cid_addr, val;
5213	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5214	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5215
5216	if (ring_num == 0)
5217		cid = RX_CID;
5218	else
5219		cid = RX_RSS_CID + ring_num - 1;
5220
5221	rx_cid_addr = GET_CID_ADDR(cid);
5222
5223	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5224			     bp->rx_buf_use_size, bp->rx_max_ring);
5225
5226	bnx2_init_rx_context(bp, cid);
5227
5228	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5229		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5230		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5231	}
5232
5233	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5234	if (bp->rx_pg_ring_size) {
5235		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5236				     rxr->rx_pg_desc_mapping,
5237				     PAGE_SIZE, bp->rx_max_pg_ring);
5238		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5239		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5240		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5241		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5242
5243		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5244		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5245
5246		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5247		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5248
5249		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5250			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5251	}
5252
5253	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5254	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5255
5256	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5257	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5258
5259	ring_prod = prod = rxr->rx_pg_prod;
5260	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5261		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5262			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5263				    ring_num, i, bp->rx_pg_ring_size);
5264			break;
5265		}
5266		prod = BNX2_NEXT_RX_BD(prod);
5267		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5268	}
5269	rxr->rx_pg_prod = prod;
5270
5271	ring_prod = prod = rxr->rx_prod;
5272	for (i = 0; i < bp->rx_ring_size; i++) {
5273		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5274			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5275				    ring_num, i, bp->rx_ring_size);
5276			break;
5277		}
5278		prod = BNX2_NEXT_RX_BD(prod);
5279		ring_prod = BNX2_RX_RING_IDX(prod);
5280	}
5281	rxr->rx_prod = prod;
5282
5283	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5284	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5285	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5286
5287	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5288	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5289
5290	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5291}
5292
5293static void
5294bnx2_init_all_rings(struct bnx2 *bp)
5295{
5296	int i;
5297	u32 val;
5298
5299	bnx2_clear_ring_states(bp);
5300
5301	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5302	for (i = 0; i < bp->num_tx_rings; i++)
5303		bnx2_init_tx_ring(bp, i);
5304
5305	if (bp->num_tx_rings > 1)
5306		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5307			(TX_TSS_CID << 7));
5308
5309	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5310	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5311
5312	for (i = 0; i < bp->num_rx_rings; i++)
5313		bnx2_init_rx_ring(bp, i);
5314
5315	if (bp->num_rx_rings > 1) {
5316		u32 tbl_32 = 0;
5317
5318		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5319			int shift = (i % 8) << 2;
5320
5321			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5322			if ((i % 8) == 7) {
5323				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5324				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5325					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5326					BNX2_RLUP_RSS_COMMAND_WRITE |
5327					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5328				tbl_32 = 0;
5329			}
5330		}
5331
5332		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5333		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5334
5335		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5336
5337	}
5338}
5339
5340static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5341{
5342	u32 max, num_rings = 1;
5343
5344	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5345		ring_size -= BNX2_MAX_RX_DESC_CNT;
5346		num_rings++;
5347	}
5348	/* round to next power of 2 */
5349	max = max_size;
5350	while ((max & num_rings) == 0)
5351		max >>= 1;
5352
5353	if (num_rings != max)
5354		max <<= 1;
5355
5356	return max;
5357}
5358
5359static void
5360bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5361{
5362	u32 rx_size, rx_space, jumbo_size;
5363
5364	/* 8 for CRC and VLAN */
5365	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5366
5367	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5368		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5369
5370	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5371	bp->rx_pg_ring_size = 0;
5372	bp->rx_max_pg_ring = 0;
5373	bp->rx_max_pg_ring_idx = 0;
5374	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5375		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5376
5377		jumbo_size = size * pages;
5378		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5379			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5380
5381		bp->rx_pg_ring_size = jumbo_size;
5382		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5383							BNX2_MAX_RX_PG_RINGS);
5384		bp->rx_max_pg_ring_idx =
5385			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5386		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5387		bp->rx_copy_thresh = 0;
5388	}
5389
5390	bp->rx_buf_use_size = rx_size;
5391	/* hw alignment + build_skb() overhead*/
5392	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5393		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5394	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5395	bp->rx_ring_size = size;
5396	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5397	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5398}
5399
5400static void
5401bnx2_free_tx_skbs(struct bnx2 *bp)
5402{
5403	int i;
5404
5405	for (i = 0; i < bp->num_tx_rings; i++) {
5406		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5407		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5408		int j;
5409
5410		if (txr->tx_buf_ring == NULL)
5411			continue;
5412
5413		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5414			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5415			struct sk_buff *skb = tx_buf->skb;
5416			int k, last;
5417
5418			if (skb == NULL) {
5419				j = BNX2_NEXT_TX_BD(j);
5420				continue;
5421			}
5422
5423			dma_unmap_single(&bp->pdev->dev,
5424					 dma_unmap_addr(tx_buf, mapping),
5425					 skb_headlen(skb),
5426					 PCI_DMA_TODEVICE);
5427
5428			tx_buf->skb = NULL;
5429
5430			last = tx_buf->nr_frags;
5431			j = BNX2_NEXT_TX_BD(j);
5432			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5433				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5434				dma_unmap_page(&bp->pdev->dev,
5435					dma_unmap_addr(tx_buf, mapping),
5436					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5437					PCI_DMA_TODEVICE);
5438			}
5439			dev_kfree_skb(skb);
5440		}
5441		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5442	}
5443}
5444
5445static void
5446bnx2_free_rx_skbs(struct bnx2 *bp)
5447{
5448	int i;
5449
5450	for (i = 0; i < bp->num_rx_rings; i++) {
5451		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5452		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5453		int j;
5454
5455		if (rxr->rx_buf_ring == NULL)
5456			return;
5457
5458		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5459			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5460			u8 *data = rx_buf->data;
5461
5462			if (data == NULL)
5463				continue;
5464
5465			dma_unmap_single(&bp->pdev->dev,
5466					 dma_unmap_addr(rx_buf, mapping),
5467					 bp->rx_buf_use_size,
5468					 PCI_DMA_FROMDEVICE);
5469
5470			rx_buf->data = NULL;
5471
5472			kfree(data);
5473		}
5474		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5475			bnx2_free_rx_page(bp, rxr, j);
5476	}
5477}
5478
5479static void
5480bnx2_free_skbs(struct bnx2 *bp)
5481{
5482	bnx2_free_tx_skbs(bp);
5483	bnx2_free_rx_skbs(bp);
5484}
5485
5486static int
5487bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5488{
5489	int rc;
5490
5491	rc = bnx2_reset_chip(bp, reset_code);
5492	bnx2_free_skbs(bp);
5493	if (rc)
5494		return rc;
5495
5496	if ((rc = bnx2_init_chip(bp)) != 0)
5497		return rc;
5498
5499	bnx2_init_all_rings(bp);
5500	return 0;
5501}
5502
5503static int
5504bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5505{
5506	int rc;
5507
5508	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5509		return rc;
5510
5511	spin_lock_bh(&bp->phy_lock);
5512	bnx2_init_phy(bp, reset_phy);
5513	bnx2_set_link(bp);
5514	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5515		bnx2_remote_phy_event(bp);
5516	spin_unlock_bh(&bp->phy_lock);
5517	return 0;
5518}
5519
5520static int
5521bnx2_shutdown_chip(struct bnx2 *bp)
5522{
5523	u32 reset_code;
5524
5525	if (bp->flags & BNX2_FLAG_NO_WOL)
5526		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5527	else if (bp->wol)
5528		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5529	else
5530		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5531
5532	return bnx2_reset_chip(bp, reset_code);
5533}
5534
5535static int
5536bnx2_test_registers(struct bnx2 *bp)
5537{
5538	int ret;
5539	int i, is_5709;
5540	static const struct {
5541		u16   offset;
5542		u16   flags;
5543#define BNX2_FL_NOT_5709	1
5544		u32   rw_mask;
5545		u32   ro_mask;
5546	} reg_tbl[] = {
5547		{ 0x006c, 0, 0x00000000, 0x0000003f },
5548		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5549		{ 0x0094, 0, 0x00000000, 0x00000000 },
5550
5551		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5552		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5553		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5554		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5555		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5556		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5557		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5558		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5559		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5560
5561		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5562		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5564		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5565		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5566		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5567
5568		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5569		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5570		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5571
5572		{ 0x1000, 0, 0x00000000, 0x00000001 },
5573		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5574
5575		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5576		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5577		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5578		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5579		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5580		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5581		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5582		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5583		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5584		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5585
5586		{ 0x1800, 0, 0x00000000, 0x00000001 },
5587		{ 0x1804, 0, 0x00000000, 0x00000003 },
5588
5589		{ 0x2800, 0, 0x00000000, 0x00000001 },
5590		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5591		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5592		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5593		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5594		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5595		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5596		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5597		{ 0x2840, 0, 0x00000000, 0xffffffff },
5598		{ 0x2844, 0, 0x00000000, 0xffffffff },
5599		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5600		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5601
5602		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5603		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5604
5605		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5606		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5607		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5608		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5609		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5610		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5611		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5612		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5613		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5614
5615		{ 0x5004, 0, 0x00000000, 0x0000007f },
5616		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5617
5618		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5619		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5620		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5621		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5622		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5623		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5624		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5625		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5626		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5627
5628		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5629		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5630		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5631		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5632		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5633		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5634		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5635		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5636		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5637		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5638		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5639		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5640		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5641		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5642		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5643		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5644		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5645		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5646		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5647		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5648		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5649		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5650		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5651
5652		{ 0xffff, 0, 0x00000000, 0x00000000 },
5653	};
5654
5655	ret = 0;
5656	is_5709 = 0;
5657	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5658		is_5709 = 1;
5659
5660	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5661		u32 offset, rw_mask, ro_mask, save_val, val;
5662		u16 flags = reg_tbl[i].flags;
5663
5664		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5665			continue;
5666
5667		offset = (u32) reg_tbl[i].offset;
5668		rw_mask = reg_tbl[i].rw_mask;
5669		ro_mask = reg_tbl[i].ro_mask;
5670
5671		save_val = readl(bp->regview + offset);
5672
5673		writel(0, bp->regview + offset);
5674
5675		val = readl(bp->regview + offset);
5676		if ((val & rw_mask) != 0) {
5677			goto reg_test_err;
5678		}
5679
5680		if ((val & ro_mask) != (save_val & ro_mask)) {
5681			goto reg_test_err;
5682		}
5683
5684		writel(0xffffffff, bp->regview + offset);
5685
5686		val = readl(bp->regview + offset);
5687		if ((val & rw_mask) != rw_mask) {
5688			goto reg_test_err;
5689		}
5690
5691		if ((val & ro_mask) != (save_val & ro_mask)) {
5692			goto reg_test_err;
5693		}
5694
5695		writel(save_val, bp->regview + offset);
5696		continue;
5697
5698reg_test_err:
5699		writel(save_val, bp->regview + offset);
5700		ret = -ENODEV;
5701		break;
5702	}
5703	return ret;
5704}
5705
5706static int
5707bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5708{
5709	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5710		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5711	int i;
5712
5713	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5714		u32 offset;
5715
5716		for (offset = 0; offset < size; offset += 4) {
5717
5718			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5719
5720			if (bnx2_reg_rd_ind(bp, start + offset) !=
5721				test_pattern[i]) {
5722				return -ENODEV;
5723			}
5724		}
5725	}
5726	return 0;
5727}
5728
5729static int
5730bnx2_test_memory(struct bnx2 *bp)
5731{
5732	int ret = 0;
5733	int i;
5734	static struct mem_entry {
5735		u32   offset;
5736		u32   len;
5737	} mem_tbl_5706[] = {
5738		{ 0x60000,  0x4000 },
5739		{ 0xa0000,  0x3000 },
5740		{ 0xe0000,  0x4000 },
5741		{ 0x120000, 0x4000 },
5742		{ 0x1a0000, 0x4000 },
5743		{ 0x160000, 0x4000 },
5744		{ 0xffffffff, 0    },
5745	},
5746	mem_tbl_5709[] = {
5747		{ 0x60000,  0x4000 },
5748		{ 0xa0000,  0x3000 },
5749		{ 0xe0000,  0x4000 },
5750		{ 0x120000, 0x4000 },
5751		{ 0x1a0000, 0x4000 },
5752		{ 0xffffffff, 0    },
5753	};
5754	struct mem_entry *mem_tbl;
5755
5756	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5757		mem_tbl = mem_tbl_5709;
5758	else
5759		mem_tbl = mem_tbl_5706;
5760
5761	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5762		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5763			mem_tbl[i].len)) != 0) {
5764			return ret;
5765		}
5766	}
5767
5768	return ret;
5769}
5770
5771#define BNX2_MAC_LOOPBACK	0
5772#define BNX2_PHY_LOOPBACK	1
5773
5774static int
5775bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5776{
5777	unsigned int pkt_size, num_pkts, i;
5778	struct sk_buff *skb;
5779	u8 *data;
5780	unsigned char *packet;
5781	u16 rx_start_idx, rx_idx;
5782	dma_addr_t map;
5783	struct bnx2_tx_bd *txbd;
5784	struct bnx2_sw_bd *rx_buf;
5785	struct l2_fhdr *rx_hdr;
5786	int ret = -ENODEV;
5787	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5788	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5789	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5790
5791	tx_napi = bnapi;
5792
5793	txr = &tx_napi->tx_ring;
5794	rxr = &bnapi->rx_ring;
5795	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5796		bp->loopback = MAC_LOOPBACK;
5797		bnx2_set_mac_loopback(bp);
5798	}
5799	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5800		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5801			return 0;
5802
5803		bp->loopback = PHY_LOOPBACK;
5804		bnx2_set_phy_loopback(bp);
5805	}
5806	else
5807		return -EINVAL;
5808
5809	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5810	skb = netdev_alloc_skb(bp->dev, pkt_size);
5811	if (!skb)
5812		return -ENOMEM;
5813	packet = skb_put(skb, pkt_size);
5814	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5815	memset(packet + ETH_ALEN, 0x0, 8);
5816	for (i = 14; i < pkt_size; i++)
5817		packet[i] = (unsigned char) (i & 0xff);
5818
5819	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5820			     PCI_DMA_TODEVICE);
5821	if (dma_mapping_error(&bp->pdev->dev, map)) {
5822		dev_kfree_skb(skb);
5823		return -EIO;
5824	}
5825
5826	BNX2_WR(bp, BNX2_HC_COMMAND,
5827		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5828
5829	BNX2_RD(bp, BNX2_HC_COMMAND);
5830
5831	udelay(5);
5832	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5833
5834	num_pkts = 0;
5835
5836	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5837
5838	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5839	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5840	txbd->tx_bd_mss_nbytes = pkt_size;
5841	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5842
5843	num_pkts++;
5844	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5845	txr->tx_prod_bseq += pkt_size;
5846
5847	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5848	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5849
5850	udelay(100);
5851
5852	BNX2_WR(bp, BNX2_HC_COMMAND,
5853		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5854
5855	BNX2_RD(bp, BNX2_HC_COMMAND);
5856
5857	udelay(5);
5858
5859	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5860	dev_kfree_skb(skb);
5861
5862	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5863		goto loopback_test_done;
5864
5865	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5866	if (rx_idx != rx_start_idx + num_pkts) {
5867		goto loopback_test_done;
5868	}
5869
5870	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5871	data = rx_buf->data;
5872
5873	rx_hdr = get_l2_fhdr(data);
5874	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5875
5876	dma_sync_single_for_cpu(&bp->pdev->dev,
5877		dma_unmap_addr(rx_buf, mapping),
5878		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5879
5880	if (rx_hdr->l2_fhdr_status &
5881		(L2_FHDR_ERRORS_BAD_CRC |
5882		L2_FHDR_ERRORS_PHY_DECODE |
5883		L2_FHDR_ERRORS_ALIGNMENT |
5884		L2_FHDR_ERRORS_TOO_SHORT |
5885		L2_FHDR_ERRORS_GIANT_FRAME)) {
5886
5887		goto loopback_test_done;
5888	}
5889
5890	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5891		goto loopback_test_done;
5892	}
5893
5894	for (i = 14; i < pkt_size; i++) {
5895		if (*(data + i) != (unsigned char) (i & 0xff)) {
5896			goto loopback_test_done;
5897		}
5898	}
5899
5900	ret = 0;
5901
5902loopback_test_done:
5903	bp->loopback = 0;
5904	return ret;
5905}
5906
5907#define BNX2_MAC_LOOPBACK_FAILED	1
5908#define BNX2_PHY_LOOPBACK_FAILED	2
5909#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5910					 BNX2_PHY_LOOPBACK_FAILED)
5911
5912static int
5913bnx2_test_loopback(struct bnx2 *bp)
5914{
5915	int rc = 0;
5916
5917	if (!netif_running(bp->dev))
5918		return BNX2_LOOPBACK_FAILED;
5919
5920	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5921	spin_lock_bh(&bp->phy_lock);
5922	bnx2_init_phy(bp, 1);
5923	spin_unlock_bh(&bp->phy_lock);
5924	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5925		rc |= BNX2_MAC_LOOPBACK_FAILED;
5926	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5927		rc |= BNX2_PHY_LOOPBACK_FAILED;
5928	return rc;
5929}
5930
5931#define NVRAM_SIZE 0x200
5932#define CRC32_RESIDUAL 0xdebb20e3
5933
5934static int
5935bnx2_test_nvram(struct bnx2 *bp)
5936{
5937	__be32 buf[NVRAM_SIZE / 4];
5938	u8 *data = (u8 *) buf;
5939	int rc = 0;
5940	u32 magic, csum;
5941
5942	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5943		goto test_nvram_done;
5944
5945        magic = be32_to_cpu(buf[0]);
5946	if (magic != 0x669955aa) {
5947		rc = -ENODEV;
5948		goto test_nvram_done;
5949	}
5950
5951	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5952		goto test_nvram_done;
5953
5954	csum = ether_crc_le(0x100, data);
5955	if (csum != CRC32_RESIDUAL) {
5956		rc = -ENODEV;
5957		goto test_nvram_done;
5958	}
5959
5960	csum = ether_crc_le(0x100, data + 0x100);
5961	if (csum != CRC32_RESIDUAL) {
5962		rc = -ENODEV;
5963	}
5964
5965test_nvram_done:
5966	return rc;
5967}
5968
5969static int
5970bnx2_test_link(struct bnx2 *bp)
5971{
5972	u32 bmsr;
5973
5974	if (!netif_running(bp->dev))
5975		return -ENODEV;
5976
5977	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5978		if (bp->link_up)
5979			return 0;
5980		return -ENODEV;
5981	}
5982	spin_lock_bh(&bp->phy_lock);
5983	bnx2_enable_bmsr1(bp);
5984	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5985	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5986	bnx2_disable_bmsr1(bp);
5987	spin_unlock_bh(&bp->phy_lock);
5988
5989	if (bmsr & BMSR_LSTATUS) {
5990		return 0;
5991	}
5992	return -ENODEV;
5993}
5994
5995static int
5996bnx2_test_intr(struct bnx2 *bp)
5997{
5998	int i;
5999	u16 status_idx;
6000
6001	if (!netif_running(bp->dev))
6002		return -ENODEV;
6003
6004	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6005
6006	/* This register is not touched during run-time. */
6007	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6008	BNX2_RD(bp, BNX2_HC_COMMAND);
6009
6010	for (i = 0; i < 10; i++) {
6011		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6012			status_idx) {
6013
6014			break;
6015		}
6016
6017		msleep_interruptible(10);
6018	}
6019	if (i < 10)
6020		return 0;
6021
6022	return -ENODEV;
6023}
6024
6025/* Determining link for parallel detection. */
6026static int
6027bnx2_5706_serdes_has_link(struct bnx2 *bp)
6028{
6029	u32 mode_ctl, an_dbg, exp;
6030
6031	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6032		return 0;
6033
6034	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6035	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6036
6037	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6038		return 0;
6039
6040	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6041	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6042	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6043
6044	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6045		return 0;
6046
6047	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6048	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6049	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6050
6051	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6052		return 0;
6053
6054	return 1;
6055}
6056
6057static void
6058bnx2_5706_serdes_timer(struct bnx2 *bp)
6059{
6060	int check_link = 1;
6061
6062	spin_lock(&bp->phy_lock);
6063	if (bp->serdes_an_pending) {
6064		bp->serdes_an_pending--;
6065		check_link = 0;
6066	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6067		u32 bmcr;
6068
6069		bp->current_interval = BNX2_TIMER_INTERVAL;
6070
6071		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6072
6073		if (bmcr & BMCR_ANENABLE) {
6074			if (bnx2_5706_serdes_has_link(bp)) {
6075				bmcr &= ~BMCR_ANENABLE;
6076				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6077				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6078				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6079			}
6080		}
6081	}
6082	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6083		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6084		u32 phy2;
6085
6086		bnx2_write_phy(bp, 0x17, 0x0f01);
6087		bnx2_read_phy(bp, 0x15, &phy2);
6088		if (phy2 & 0x20) {
6089			u32 bmcr;
6090
6091			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6092			bmcr |= BMCR_ANENABLE;
6093			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6094
6095			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6096		}
6097	} else
6098		bp->current_interval = BNX2_TIMER_INTERVAL;
6099
6100	if (check_link) {
6101		u32 val;
6102
6103		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6104		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6105		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6106
6107		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6108			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6109				bnx2_5706s_force_link_dn(bp, 1);
6110				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6111			} else
6112				bnx2_set_link(bp);
6113		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6114			bnx2_set_link(bp);
6115	}
6116	spin_unlock(&bp->phy_lock);
6117}
6118
6119static void
6120bnx2_5708_serdes_timer(struct bnx2 *bp)
6121{
6122	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6123		return;
6124
6125	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6126		bp->serdes_an_pending = 0;
6127		return;
6128	}
6129
6130	spin_lock(&bp->phy_lock);
6131	if (bp->serdes_an_pending)
6132		bp->serdes_an_pending--;
6133	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6134		u32 bmcr;
6135
6136		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6137		if (bmcr & BMCR_ANENABLE) {
6138			bnx2_enable_forced_2g5(bp);
6139			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6140		} else {
6141			bnx2_disable_forced_2g5(bp);
6142			bp->serdes_an_pending = 2;
6143			bp->current_interval = BNX2_TIMER_INTERVAL;
6144		}
6145
6146	} else
6147		bp->current_interval = BNX2_TIMER_INTERVAL;
6148
6149	spin_unlock(&bp->phy_lock);
6150}
6151
6152static void
6153bnx2_timer(unsigned long data)
6154{
6155	struct bnx2 *bp = (struct bnx2 *) data;
6156
6157	if (!netif_running(bp->dev))
6158		return;
6159
6160	if (atomic_read(&bp->intr_sem) != 0)
6161		goto bnx2_restart_timer;
6162
6163	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6164	     BNX2_FLAG_USING_MSI)
6165		bnx2_chk_missed_msi(bp);
6166
6167	bnx2_send_heart_beat(bp);
6168
6169	bp->stats_blk->stat_FwRxDrop =
6170		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6171
6172	/* workaround occasional corrupted counters */
6173	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6174		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6175			BNX2_HC_COMMAND_STATS_NOW);
6176
6177	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6178		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6179			bnx2_5706_serdes_timer(bp);
6180		else
6181			bnx2_5708_serdes_timer(bp);
6182	}
6183
6184bnx2_restart_timer:
6185	mod_timer(&bp->timer, jiffies + bp->current_interval);
6186}
6187
6188static int
6189bnx2_request_irq(struct bnx2 *bp)
6190{
6191	unsigned long flags;
6192	struct bnx2_irq *irq;
6193	int rc = 0, i;
6194
6195	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6196		flags = 0;
6197	else
6198		flags = IRQF_SHARED;
6199
6200	for (i = 0; i < bp->irq_nvecs; i++) {
6201		irq = &bp->irq_tbl[i];
6202		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6203				 &bp->bnx2_napi[i]);
6204		if (rc)
6205			break;
6206		irq->requested = 1;
6207	}
6208	return rc;
6209}
6210
6211static void
6212__bnx2_free_irq(struct bnx2 *bp)
6213{
6214	struct bnx2_irq *irq;
6215	int i;
6216
6217	for (i = 0; i < bp->irq_nvecs; i++) {
6218		irq = &bp->irq_tbl[i];
6219		if (irq->requested)
6220			free_irq(irq->vector, &bp->bnx2_napi[i]);
6221		irq->requested = 0;
6222	}
6223}
6224
6225static void
6226bnx2_free_irq(struct bnx2 *bp)
6227{
6228
6229	__bnx2_free_irq(bp);
6230	if (bp->flags & BNX2_FLAG_USING_MSI)
6231		pci_disable_msi(bp->pdev);
6232	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6233		pci_disable_msix(bp->pdev);
6234
6235	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6236}
6237
6238static void
6239bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6240{
6241	int i, total_vecs;
6242	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6243	struct net_device *dev = bp->dev;
6244	const int len = sizeof(bp->irq_tbl[0].name);
6245
6246	bnx2_setup_msix_tbl(bp);
6247	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6248	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6249	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6250
6251	/*  Need to flush the previous three writes to ensure MSI-X
6252	 *  is setup properly */
6253	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6254
6255	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6256		msix_ent[i].entry = i;
6257		msix_ent[i].vector = 0;
6258	}
6259
6260	total_vecs = msix_vecs;
6261#ifdef BCM_CNIC
6262	total_vecs++;
6263#endif
6264	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6265					   BNX2_MIN_MSIX_VEC, total_vecs);
6266	if (total_vecs < 0)
6267		return;
6268
6269	msix_vecs = total_vecs;
6270#ifdef BCM_CNIC
6271	msix_vecs--;
6272#endif
6273	bp->irq_nvecs = msix_vecs;
6274	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6275	for (i = 0; i < total_vecs; i++) {
6276		bp->irq_tbl[i].vector = msix_ent[i].vector;
6277		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6278		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6279	}
6280}
6281
6282static int
6283bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6284{
6285	int cpus = netif_get_num_default_rss_queues();
6286	int msix_vecs;
6287
6288	if (!bp->num_req_rx_rings)
6289		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6290	else if (!bp->num_req_tx_rings)
6291		msix_vecs = max(cpus, bp->num_req_rx_rings);
6292	else
6293		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6294
6295	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6296
6297	bp->irq_tbl[0].handler = bnx2_interrupt;
6298	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6299	bp->irq_nvecs = 1;
6300	bp->irq_tbl[0].vector = bp->pdev->irq;
6301
6302	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6303		bnx2_enable_msix(bp, msix_vecs);
6304
6305	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6306	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6307		if (pci_enable_msi(bp->pdev) == 0) {
6308			bp->flags |= BNX2_FLAG_USING_MSI;
6309			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6310				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6311				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6312			} else
6313				bp->irq_tbl[0].handler = bnx2_msi;
6314
6315			bp->irq_tbl[0].vector = bp->pdev->irq;
6316		}
6317	}
6318
6319	if (!bp->num_req_tx_rings)
6320		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6321	else
6322		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6323
6324	if (!bp->num_req_rx_rings)
6325		bp->num_rx_rings = bp->irq_nvecs;
6326	else
6327		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6328
6329	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6330
6331	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6332}
6333
6334/* Called with rtnl_lock */
6335static int
6336bnx2_open(struct net_device *dev)
6337{
6338	struct bnx2 *bp = netdev_priv(dev);
6339	int rc;
6340
6341	rc = bnx2_request_firmware(bp);
6342	if (rc < 0)
6343		goto out;
6344
6345	netif_carrier_off(dev);
6346
6347	bnx2_disable_int(bp);
6348
6349	rc = bnx2_setup_int_mode(bp, disable_msi);
6350	if (rc)
6351		goto open_err;
6352	bnx2_init_napi(bp);
6353	bnx2_napi_enable(bp);
6354	rc = bnx2_alloc_mem(bp);
6355	if (rc)
6356		goto open_err;
6357
6358	rc = bnx2_request_irq(bp);
6359	if (rc)
6360		goto open_err;
6361
6362	rc = bnx2_init_nic(bp, 1);
6363	if (rc)
6364		goto open_err;
6365
6366	mod_timer(&bp->timer, jiffies + bp->current_interval);
6367
6368	atomic_set(&bp->intr_sem, 0);
6369
6370	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6371
6372	bnx2_enable_int(bp);
6373
6374	if (bp->flags & BNX2_FLAG_USING_MSI) {
6375		/* Test MSI to make sure it is working
6376		 * If MSI test fails, go back to INTx mode
6377		 */
6378		if (bnx2_test_intr(bp) != 0) {
6379			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6380
6381			bnx2_disable_int(bp);
6382			bnx2_free_irq(bp);
6383
6384			bnx2_setup_int_mode(bp, 1);
6385
6386			rc = bnx2_init_nic(bp, 0);
6387
6388			if (!rc)
6389				rc = bnx2_request_irq(bp);
6390
6391			if (rc) {
6392				del_timer_sync(&bp->timer);
6393				goto open_err;
6394			}
6395			bnx2_enable_int(bp);
6396		}
6397	}
6398	if (bp->flags & BNX2_FLAG_USING_MSI)
6399		netdev_info(dev, "using MSI\n");
6400	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6401		netdev_info(dev, "using MSIX\n");
6402
6403	netif_tx_start_all_queues(dev);
6404out:
6405	return rc;
6406
6407open_err:
6408	bnx2_napi_disable(bp);
6409	bnx2_free_skbs(bp);
6410	bnx2_free_irq(bp);
6411	bnx2_free_mem(bp);
6412	bnx2_del_napi(bp);
6413	bnx2_release_firmware(bp);
6414	goto out;
6415}
6416
6417static void
6418bnx2_reset_task(struct work_struct *work)
6419{
6420	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6421	int rc;
6422	u16 pcicmd;
6423
6424	rtnl_lock();
6425	if (!netif_running(bp->dev)) {
6426		rtnl_unlock();
6427		return;
6428	}
6429
6430	bnx2_netif_stop(bp, true);
6431
6432	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6433	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6434		/* in case PCI block has reset */
6435		pci_restore_state(bp->pdev);
6436		pci_save_state(bp->pdev);
6437	}
6438	rc = bnx2_init_nic(bp, 1);
6439	if (rc) {
6440		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6441		bnx2_napi_enable(bp);
6442		dev_close(bp->dev);
6443		rtnl_unlock();
6444		return;
6445	}
6446
6447	atomic_set(&bp->intr_sem, 1);
6448	bnx2_netif_start(bp, true);
6449	rtnl_unlock();
6450}
6451
6452#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6453
6454static void
6455bnx2_dump_ftq(struct bnx2 *bp)
6456{
6457	int i;
6458	u32 reg, bdidx, cid, valid;
6459	struct net_device *dev = bp->dev;
6460	static const struct ftq_reg {
6461		char *name;
6462		u32 off;
6463	} ftq_arr[] = {
6464		BNX2_FTQ_ENTRY(RV2P_P),
6465		BNX2_FTQ_ENTRY(RV2P_T),
6466		BNX2_FTQ_ENTRY(RV2P_M),
6467		BNX2_FTQ_ENTRY(TBDR_),
6468		BNX2_FTQ_ENTRY(TDMA_),
6469		BNX2_FTQ_ENTRY(TXP_),
6470		BNX2_FTQ_ENTRY(TXP_),
6471		BNX2_FTQ_ENTRY(TPAT_),
6472		BNX2_FTQ_ENTRY(RXP_C),
6473		BNX2_FTQ_ENTRY(RXP_),
6474		BNX2_FTQ_ENTRY(COM_COMXQ_),
6475		BNX2_FTQ_ENTRY(COM_COMTQ_),
6476		BNX2_FTQ_ENTRY(COM_COMQ_),
6477		BNX2_FTQ_ENTRY(CP_CPQ_),
6478	};
6479
6480	netdev_err(dev, "<--- start FTQ dump --->\n");
6481	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6482		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6483			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6484
6485	netdev_err(dev, "CPU states:\n");
6486	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6487		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6488			   reg, bnx2_reg_rd_ind(bp, reg),
6489			   bnx2_reg_rd_ind(bp, reg + 4),
6490			   bnx2_reg_rd_ind(bp, reg + 8),
6491			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6492			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6493			   bnx2_reg_rd_ind(bp, reg + 0x20));
6494
6495	netdev_err(dev, "<--- end FTQ dump --->\n");
6496	netdev_err(dev, "<--- start TBDC dump --->\n");
6497	netdev_err(dev, "TBDC free cnt: %ld\n",
6498		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6499	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6500	for (i = 0; i < 0x20; i++) {
6501		int j = 0;
6502
6503		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6504		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6505			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6506		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6507		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6508			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6509			j++;
6510
6511		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6512		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6513		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6514		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6515			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6516			   bdidx >> 24, (valid >> 8) & 0x0ff);
6517	}
6518	netdev_err(dev, "<--- end TBDC dump --->\n");
6519}
6520
6521static void
6522bnx2_dump_state(struct bnx2 *bp)
6523{
6524	struct net_device *dev = bp->dev;
6525	u32 val1, val2;
6526
6527	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6528	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6529		   atomic_read(&bp->intr_sem), val1);
6530	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6531	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6532	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6533	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6534		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6535		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6536	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6537		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6538	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6539		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6540	if (bp->flags & BNX2_FLAG_USING_MSIX)
6541		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6542			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6543}
6544
6545static void
6546bnx2_tx_timeout(struct net_device *dev)
6547{
6548	struct bnx2 *bp = netdev_priv(dev);
6549
6550	bnx2_dump_ftq(bp);
6551	bnx2_dump_state(bp);
6552	bnx2_dump_mcp_state(bp);
6553
6554	/* This allows the netif to be shutdown gracefully before resetting */
6555	schedule_work(&bp->reset_task);
6556}
6557
6558/* Called with netif_tx_lock.
6559 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6560 * netif_wake_queue().
6561 */
6562static netdev_tx_t
6563bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6564{
6565	struct bnx2 *bp = netdev_priv(dev);
6566	dma_addr_t mapping;
6567	struct bnx2_tx_bd *txbd;
6568	struct bnx2_sw_tx_bd *tx_buf;
6569	u32 len, vlan_tag_flags, last_frag, mss;
6570	u16 prod, ring_prod;
6571	int i;
6572	struct bnx2_napi *bnapi;
6573	struct bnx2_tx_ring_info *txr;
6574	struct netdev_queue *txq;
6575
6576	/*  Determine which tx ring we will be placed on */
6577	i = skb_get_queue_mapping(skb);
6578	bnapi = &bp->bnx2_napi[i];
6579	txr = &bnapi->tx_ring;
6580	txq = netdev_get_tx_queue(dev, i);
6581
6582	if (unlikely(bnx2_tx_avail(bp, txr) <
6583	    (skb_shinfo(skb)->nr_frags + 1))) {
6584		netif_tx_stop_queue(txq);
6585		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6586
6587		return NETDEV_TX_BUSY;
6588	}
6589	len = skb_headlen(skb);
6590	prod = txr->tx_prod;
6591	ring_prod = BNX2_TX_RING_IDX(prod);
6592
6593	vlan_tag_flags = 0;
6594	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6595		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6596	}
6597
6598	if (skb_vlan_tag_present(skb)) {
6599		vlan_tag_flags |=
6600			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6601	}
6602
6603	if ((mss = skb_shinfo(skb)->gso_size)) {
6604		u32 tcp_opt_len;
6605		struct iphdr *iph;
6606
6607		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6608
6609		tcp_opt_len = tcp_optlen(skb);
6610
6611		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6612			u32 tcp_off = skb_transport_offset(skb) -
6613				      sizeof(struct ipv6hdr) - ETH_HLEN;
6614
6615			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6616					  TX_BD_FLAGS_SW_FLAGS;
6617			if (likely(tcp_off == 0))
6618				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6619			else {
6620				tcp_off >>= 3;
6621				vlan_tag_flags |= ((tcp_off & 0x3) <<
6622						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6623						  ((tcp_off & 0x10) <<
6624						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6625				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6626			}
6627		} else {
6628			iph = ip_hdr(skb);
6629			if (tcp_opt_len || (iph->ihl > 5)) {
6630				vlan_tag_flags |= ((iph->ihl - 5) +
6631						   (tcp_opt_len >> 2)) << 8;
6632			}
6633		}
6634	} else
6635		mss = 0;
6636
6637	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6638	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6639		dev_kfree_skb_any(skb);
6640		return NETDEV_TX_OK;
6641	}
6642
6643	tx_buf = &txr->tx_buf_ring[ring_prod];
6644	tx_buf->skb = skb;
6645	dma_unmap_addr_set(tx_buf, mapping, mapping);
6646
6647	txbd = &txr->tx_desc_ring[ring_prod];
6648
6649	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6650	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6651	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6652	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6653
6654	last_frag = skb_shinfo(skb)->nr_frags;
6655	tx_buf->nr_frags = last_frag;
6656	tx_buf->is_gso = skb_is_gso(skb);
6657
6658	for (i = 0; i < last_frag; i++) {
6659		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6660
6661		prod = BNX2_NEXT_TX_BD(prod);
6662		ring_prod = BNX2_TX_RING_IDX(prod);
6663		txbd = &txr->tx_desc_ring[ring_prod];
6664
6665		len = skb_frag_size(frag);
6666		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6667					   DMA_TO_DEVICE);
6668		if (dma_mapping_error(&bp->pdev->dev, mapping))
6669			goto dma_error;
6670		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6671				   mapping);
6672
6673		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6674		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6675		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6676		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6677
6678	}
6679	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6680
6681	/* Sync BD data before updating TX mailbox */
6682	wmb();
6683
6684	netdev_tx_sent_queue(txq, skb->len);
6685
6686	prod = BNX2_NEXT_TX_BD(prod);
6687	txr->tx_prod_bseq += skb->len;
6688
6689	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6690	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6691
6692	mmiowb();
6693
6694	txr->tx_prod = prod;
6695
6696	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6697		netif_tx_stop_queue(txq);
6698
6699		/* netif_tx_stop_queue() must be done before checking
6700		 * tx index in bnx2_tx_avail() below, because in
6701		 * bnx2_tx_int(), we update tx index before checking for
6702		 * netif_tx_queue_stopped().
6703		 */
6704		smp_mb();
6705		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6706			netif_tx_wake_queue(txq);
6707	}
6708
6709	return NETDEV_TX_OK;
6710dma_error:
6711	/* save value of frag that failed */
6712	last_frag = i;
6713
6714	/* start back at beginning and unmap skb */
6715	prod = txr->tx_prod;
6716	ring_prod = BNX2_TX_RING_IDX(prod);
6717	tx_buf = &txr->tx_buf_ring[ring_prod];
6718	tx_buf->skb = NULL;
6719	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6720			 skb_headlen(skb), PCI_DMA_TODEVICE);
6721
6722	/* unmap remaining mapped pages */
6723	for (i = 0; i < last_frag; i++) {
6724		prod = BNX2_NEXT_TX_BD(prod);
6725		ring_prod = BNX2_TX_RING_IDX(prod);
6726		tx_buf = &txr->tx_buf_ring[ring_prod];
6727		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6728			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6729			       PCI_DMA_TODEVICE);
6730	}
6731
6732	dev_kfree_skb_any(skb);
6733	return NETDEV_TX_OK;
6734}
6735
6736/* Called with rtnl_lock */
6737static int
6738bnx2_close(struct net_device *dev)
6739{
6740	struct bnx2 *bp = netdev_priv(dev);
6741
6742	bnx2_disable_int_sync(bp);
6743	bnx2_napi_disable(bp);
6744	netif_tx_disable(dev);
6745	del_timer_sync(&bp->timer);
6746	bnx2_shutdown_chip(bp);
6747	bnx2_free_irq(bp);
6748	bnx2_free_skbs(bp);
6749	bnx2_free_mem(bp);
6750	bnx2_del_napi(bp);
6751	bp->link_up = 0;
6752	netif_carrier_off(bp->dev);
6753	return 0;
6754}
6755
6756static void
6757bnx2_save_stats(struct bnx2 *bp)
6758{
6759	u32 *hw_stats = (u32 *) bp->stats_blk;
6760	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6761	int i;
6762
6763	/* The 1st 10 counters are 64-bit counters */
6764	for (i = 0; i < 20; i += 2) {
6765		u32 hi;
6766		u64 lo;
6767
6768		hi = temp_stats[i] + hw_stats[i];
6769		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6770		if (lo > 0xffffffff)
6771			hi++;
6772		temp_stats[i] = hi;
6773		temp_stats[i + 1] = lo & 0xffffffff;
6774	}
6775
6776	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6777		temp_stats[i] += hw_stats[i];
6778}
6779
6780#define GET_64BIT_NET_STATS64(ctr)		\
6781	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6782
6783#define GET_64BIT_NET_STATS(ctr)				\
6784	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6785	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6786
6787#define GET_32BIT_NET_STATS(ctr)				\
6788	(unsigned long) (bp->stats_blk->ctr +			\
6789			 bp->temp_stats_blk->ctr)
6790
6791static struct rtnl_link_stats64 *
6792bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6793{
6794	struct bnx2 *bp = netdev_priv(dev);
6795
6796	if (bp->stats_blk == NULL)
6797		return net_stats;
6798
6799	net_stats->rx_packets =
6800		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6801		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6802		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6803
6804	net_stats->tx_packets =
6805		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6806		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6807		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6808
6809	net_stats->rx_bytes =
6810		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6811
6812	net_stats->tx_bytes =
6813		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6814
6815	net_stats->multicast =
6816		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6817
6818	net_stats->collisions =
6819		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6820
6821	net_stats->rx_length_errors =
6822		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6823		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6824
6825	net_stats->rx_over_errors =
6826		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6827		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6828
6829	net_stats->rx_frame_errors =
6830		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6831
6832	net_stats->rx_crc_errors =
6833		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6834
6835	net_stats->rx_errors = net_stats->rx_length_errors +
6836		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6837		net_stats->rx_crc_errors;
6838
6839	net_stats->tx_aborted_errors =
6840		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6841		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6842
6843	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6844	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6845		net_stats->tx_carrier_errors = 0;
6846	else {
6847		net_stats->tx_carrier_errors =
6848			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6849	}
6850
6851	net_stats->tx_errors =
6852		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6853		net_stats->tx_aborted_errors +
6854		net_stats->tx_carrier_errors;
6855
6856	net_stats->rx_missed_errors =
6857		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6858		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6859		GET_32BIT_NET_STATS(stat_FwRxDrop);
6860
6861	return net_stats;
6862}
6863
6864/* All ethtool functions called with rtnl_lock */
6865
6866static int
6867bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6868{
6869	struct bnx2 *bp = netdev_priv(dev);
6870	int support_serdes = 0, support_copper = 0;
6871
6872	cmd->supported = SUPPORTED_Autoneg;
6873	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6874		support_serdes = 1;
6875		support_copper = 1;
6876	} else if (bp->phy_port == PORT_FIBRE)
6877		support_serdes = 1;
6878	else
6879		support_copper = 1;
6880
6881	if (support_serdes) {
6882		cmd->supported |= SUPPORTED_1000baseT_Full |
6883			SUPPORTED_FIBRE;
6884		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6885			cmd->supported |= SUPPORTED_2500baseX_Full;
6886
6887	}
6888	if (support_copper) {
6889		cmd->supported |= SUPPORTED_10baseT_Half |
6890			SUPPORTED_10baseT_Full |
6891			SUPPORTED_100baseT_Half |
6892			SUPPORTED_100baseT_Full |
6893			SUPPORTED_1000baseT_Full |
6894			SUPPORTED_TP;
6895
6896	}
6897
6898	spin_lock_bh(&bp->phy_lock);
6899	cmd->port = bp->phy_port;
6900	cmd->advertising = bp->advertising;
6901
6902	if (bp->autoneg & AUTONEG_SPEED) {
6903		cmd->autoneg = AUTONEG_ENABLE;
6904	} else {
6905		cmd->autoneg = AUTONEG_DISABLE;
6906	}
6907
6908	if (netif_carrier_ok(dev)) {
6909		ethtool_cmd_speed_set(cmd, bp->line_speed);
6910		cmd->duplex = bp->duplex;
6911		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6912			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6913				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6914			else
6915				cmd->eth_tp_mdix = ETH_TP_MDI;
6916		}
6917	}
6918	else {
6919		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6920		cmd->duplex = DUPLEX_UNKNOWN;
6921	}
6922	spin_unlock_bh(&bp->phy_lock);
6923
6924	cmd->transceiver = XCVR_INTERNAL;
6925	cmd->phy_address = bp->phy_addr;
6926
6927	return 0;
6928}
6929
6930static int
6931bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6932{
6933	struct bnx2 *bp = netdev_priv(dev);
6934	u8 autoneg = bp->autoneg;
6935	u8 req_duplex = bp->req_duplex;
6936	u16 req_line_speed = bp->req_line_speed;
6937	u32 advertising = bp->advertising;
6938	int err = -EINVAL;
6939
6940	spin_lock_bh(&bp->phy_lock);
6941
6942	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6943		goto err_out_unlock;
6944
6945	if (cmd->port != bp->phy_port &&
6946	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6947		goto err_out_unlock;
6948
6949	/* If device is down, we can store the settings only if the user
6950	 * is setting the currently active port.
6951	 */
6952	if (!netif_running(dev) && cmd->port != bp->phy_port)
6953		goto err_out_unlock;
6954
6955	if (cmd->autoneg == AUTONEG_ENABLE) {
6956		autoneg |= AUTONEG_SPEED;
6957
6958		advertising = cmd->advertising;
6959		if (cmd->port == PORT_TP) {
6960			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6961			if (!advertising)
6962				advertising = ETHTOOL_ALL_COPPER_SPEED;
6963		} else {
6964			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6965			if (!advertising)
6966				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6967		}
6968		advertising |= ADVERTISED_Autoneg;
6969	}
6970	else {
6971		u32 speed = ethtool_cmd_speed(cmd);
6972		if (cmd->port == PORT_FIBRE) {
6973			if ((speed != SPEED_1000 &&
6974			     speed != SPEED_2500) ||
6975			    (cmd->duplex != DUPLEX_FULL))
6976				goto err_out_unlock;
6977
6978			if (speed == SPEED_2500 &&
6979			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6980				goto err_out_unlock;
6981		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6982			goto err_out_unlock;
6983
6984		autoneg &= ~AUTONEG_SPEED;
6985		req_line_speed = speed;
6986		req_duplex = cmd->duplex;
6987		advertising = 0;
6988	}
6989
6990	bp->autoneg = autoneg;
6991	bp->advertising = advertising;
6992	bp->req_line_speed = req_line_speed;
6993	bp->req_duplex = req_duplex;
6994
6995	err = 0;
6996	/* If device is down, the new settings will be picked up when it is
6997	 * brought up.
6998	 */
6999	if (netif_running(dev))
7000		err = bnx2_setup_phy(bp, cmd->port);
7001
7002err_out_unlock:
7003	spin_unlock_bh(&bp->phy_lock);
7004
7005	return err;
7006}
7007
7008static void
7009bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7010{
7011	struct bnx2 *bp = netdev_priv(dev);
7012
7013	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7014	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7015	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7016	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7017}
7018
7019#define BNX2_REGDUMP_LEN		(32 * 1024)
7020
7021static int
7022bnx2_get_regs_len(struct net_device *dev)
7023{
7024	return BNX2_REGDUMP_LEN;
7025}
7026
7027static void
7028bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7029{
7030	u32 *p = _p, i, offset;
7031	u8 *orig_p = _p;
7032	struct bnx2 *bp = netdev_priv(dev);
7033	static const u32 reg_boundaries[] = {
7034		0x0000, 0x0098, 0x0400, 0x045c,
7035		0x0800, 0x0880, 0x0c00, 0x0c10,
7036		0x0c30, 0x0d08, 0x1000, 0x101c,
7037		0x1040, 0x1048, 0x1080, 0x10a4,
7038		0x1400, 0x1490, 0x1498, 0x14f0,
7039		0x1500, 0x155c, 0x1580, 0x15dc,
7040		0x1600, 0x1658, 0x1680, 0x16d8,
7041		0x1800, 0x1820, 0x1840, 0x1854,
7042		0x1880, 0x1894, 0x1900, 0x1984,
7043		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7044		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7045		0x2000, 0x2030, 0x23c0, 0x2400,
7046		0x2800, 0x2820, 0x2830, 0x2850,
7047		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7048		0x3c00, 0x3c94, 0x4000, 0x4010,
7049		0x4080, 0x4090, 0x43c0, 0x4458,
7050		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7051		0x4fc0, 0x5010, 0x53c0, 0x5444,
7052		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7053		0x5fc0, 0x6000, 0x6400, 0x6428,
7054		0x6800, 0x6848, 0x684c, 0x6860,
7055		0x6888, 0x6910, 0x8000
7056	};
7057
7058	regs->version = 0;
7059
7060	memset(p, 0, BNX2_REGDUMP_LEN);
7061
7062	if (!netif_running(bp->dev))
7063		return;
7064
7065	i = 0;
7066	offset = reg_boundaries[0];
7067	p += offset;
7068	while (offset < BNX2_REGDUMP_LEN) {
7069		*p++ = BNX2_RD(bp, offset);
7070		offset += 4;
7071		if (offset == reg_boundaries[i + 1]) {
7072			offset = reg_boundaries[i + 2];
7073			p = (u32 *) (orig_p + offset);
7074			i += 2;
7075		}
7076	}
7077}
7078
7079static void
7080bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7081{
7082	struct bnx2 *bp = netdev_priv(dev);
7083
7084	if (bp->flags & BNX2_FLAG_NO_WOL) {
7085		wol->supported = 0;
7086		wol->wolopts = 0;
7087	}
7088	else {
7089		wol->supported = WAKE_MAGIC;
7090		if (bp->wol)
7091			wol->wolopts = WAKE_MAGIC;
7092		else
7093			wol->wolopts = 0;
7094	}
7095	memset(&wol->sopass, 0, sizeof(wol->sopass));
7096}
7097
7098static int
7099bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7100{
7101	struct bnx2 *bp = netdev_priv(dev);
7102
7103	if (wol->wolopts & ~WAKE_MAGIC)
7104		return -EINVAL;
7105
7106	if (wol->wolopts & WAKE_MAGIC) {
7107		if (bp->flags & BNX2_FLAG_NO_WOL)
7108			return -EINVAL;
7109
7110		bp->wol = 1;
7111	}
7112	else {
7113		bp->wol = 0;
7114	}
7115
7116	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7117
7118	return 0;
7119}
7120
7121static int
7122bnx2_nway_reset(struct net_device *dev)
7123{
7124	struct bnx2 *bp = netdev_priv(dev);
7125	u32 bmcr;
7126
7127	if (!netif_running(dev))
7128		return -EAGAIN;
7129
7130	if (!(bp->autoneg & AUTONEG_SPEED)) {
7131		return -EINVAL;
7132	}
7133
7134	spin_lock_bh(&bp->phy_lock);
7135
7136	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7137		int rc;
7138
7139		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7140		spin_unlock_bh(&bp->phy_lock);
7141		return rc;
7142	}
7143
7144	/* Force a link down visible on the other side */
7145	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7146		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7147		spin_unlock_bh(&bp->phy_lock);
7148
7149		msleep(20);
7150
7151		spin_lock_bh(&bp->phy_lock);
7152
7153		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7154		bp->serdes_an_pending = 1;
7155		mod_timer(&bp->timer, jiffies + bp->current_interval);
7156	}
7157
7158	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7159	bmcr &= ~BMCR_LOOPBACK;
7160	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7161
7162	spin_unlock_bh(&bp->phy_lock);
7163
7164	return 0;
7165}
7166
7167static u32
7168bnx2_get_link(struct net_device *dev)
7169{
7170	struct bnx2 *bp = netdev_priv(dev);
7171
7172	return bp->link_up;
7173}
7174
7175static int
7176bnx2_get_eeprom_len(struct net_device *dev)
7177{
7178	struct bnx2 *bp = netdev_priv(dev);
7179
7180	if (bp->flash_info == NULL)
7181		return 0;
7182
7183	return (int) bp->flash_size;
7184}
7185
7186static int
7187bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7188		u8 *eebuf)
7189{
7190	struct bnx2 *bp = netdev_priv(dev);
7191	int rc;
7192
7193	/* parameters already validated in ethtool_get_eeprom */
7194
7195	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7196
7197	return rc;
7198}
7199
7200static int
7201bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7202		u8 *eebuf)
7203{
7204	struct bnx2 *bp = netdev_priv(dev);
7205	int rc;
7206
7207	/* parameters already validated in ethtool_set_eeprom */
7208
7209	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7210
7211	return rc;
7212}
7213
7214static int
7215bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7216{
7217	struct bnx2 *bp = netdev_priv(dev);
7218
7219	memset(coal, 0, sizeof(struct ethtool_coalesce));
7220
7221	coal->rx_coalesce_usecs = bp->rx_ticks;
7222	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7223	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7224	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7225
7226	coal->tx_coalesce_usecs = bp->tx_ticks;
7227	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7228	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7229	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7230
7231	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7232
7233	return 0;
7234}
7235
7236static int
7237bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7238{
7239	struct bnx2 *bp = netdev_priv(dev);
7240
7241	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7242	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7243
7244	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7245	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7246
7247	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7248	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7249
7250	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7251	if (bp->rx_quick_cons_trip_int > 0xff)
7252		bp->rx_quick_cons_trip_int = 0xff;
7253
7254	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7255	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7256
7257	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7258	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7259
7260	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7261	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7262
7263	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7264	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7265		0xff;
7266
7267	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7268	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7269		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7270			bp->stats_ticks = USEC_PER_SEC;
7271	}
7272	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7273		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7274	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7275
7276	if (netif_running(bp->dev)) {
7277		bnx2_netif_stop(bp, true);
7278		bnx2_init_nic(bp, 0);
7279		bnx2_netif_start(bp, true);
7280	}
7281
7282	return 0;
7283}
7284
7285static void
7286bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7287{
7288	struct bnx2 *bp = netdev_priv(dev);
7289
7290	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7291	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7292
7293	ering->rx_pending = bp->rx_ring_size;
7294	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7295
7296	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7297	ering->tx_pending = bp->tx_ring_size;
7298}
7299
7300static int
7301bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7302{
7303	if (netif_running(bp->dev)) {
7304		/* Reset will erase chipset stats; save them */
7305		bnx2_save_stats(bp);
7306
7307		bnx2_netif_stop(bp, true);
7308		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7309		if (reset_irq) {
7310			bnx2_free_irq(bp);
7311			bnx2_del_napi(bp);
7312		} else {
7313			__bnx2_free_irq(bp);
7314		}
7315		bnx2_free_skbs(bp);
7316		bnx2_free_mem(bp);
7317	}
7318
7319	bnx2_set_rx_ring_size(bp, rx);
7320	bp->tx_ring_size = tx;
7321
7322	if (netif_running(bp->dev)) {
7323		int rc = 0;
7324
7325		if (reset_irq) {
7326			rc = bnx2_setup_int_mode(bp, disable_msi);
7327			bnx2_init_napi(bp);
7328		}
7329
7330		if (!rc)
7331			rc = bnx2_alloc_mem(bp);
7332
7333		if (!rc)
7334			rc = bnx2_request_irq(bp);
7335
7336		if (!rc)
7337			rc = bnx2_init_nic(bp, 0);
7338
7339		if (rc) {
7340			bnx2_napi_enable(bp);
7341			dev_close(bp->dev);
7342			return rc;
7343		}
7344#ifdef BCM_CNIC
7345		mutex_lock(&bp->cnic_lock);
7346		/* Let cnic know about the new status block. */
7347		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7348			bnx2_setup_cnic_irq_info(bp);
7349		mutex_unlock(&bp->cnic_lock);
7350#endif
7351		bnx2_netif_start(bp, true);
7352	}
7353	return 0;
7354}
7355
7356static int
7357bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7358{
7359	struct bnx2 *bp = netdev_priv(dev);
7360	int rc;
7361
7362	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7363		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7364		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7365
7366		return -EINVAL;
7367	}
7368	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7369				   false);
7370	return rc;
7371}
7372
7373static void
7374bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7375{
7376	struct bnx2 *bp = netdev_priv(dev);
7377
7378	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7379	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7380	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7381}
7382
7383static int
7384bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7385{
7386	struct bnx2 *bp = netdev_priv(dev);
7387
7388	bp->req_flow_ctrl = 0;
7389	if (epause->rx_pause)
7390		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7391	if (epause->tx_pause)
7392		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7393
7394	if (epause->autoneg) {
7395		bp->autoneg |= AUTONEG_FLOW_CTRL;
7396	}
7397	else {
7398		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7399	}
7400
7401	if (netif_running(dev)) {
7402		spin_lock_bh(&bp->phy_lock);
7403		bnx2_setup_phy(bp, bp->phy_port);
7404		spin_unlock_bh(&bp->phy_lock);
7405	}
7406
7407	return 0;
7408}
7409
7410static struct {
7411	char string[ETH_GSTRING_LEN];
7412} bnx2_stats_str_arr[] = {
7413	{ "rx_bytes" },
7414	{ "rx_error_bytes" },
7415	{ "tx_bytes" },
7416	{ "tx_error_bytes" },
7417	{ "rx_ucast_packets" },
7418	{ "rx_mcast_packets" },
7419	{ "rx_bcast_packets" },
7420	{ "tx_ucast_packets" },
7421	{ "tx_mcast_packets" },
7422	{ "tx_bcast_packets" },
7423	{ "tx_mac_errors" },
7424	{ "tx_carrier_errors" },
7425	{ "rx_crc_errors" },
7426	{ "rx_align_errors" },
7427	{ "tx_single_collisions" },
7428	{ "tx_multi_collisions" },
7429	{ "tx_deferred" },
7430	{ "tx_excess_collisions" },
7431	{ "tx_late_collisions" },
7432	{ "tx_total_collisions" },
7433	{ "rx_fragments" },
7434	{ "rx_jabbers" },
7435	{ "rx_undersize_packets" },
7436	{ "rx_oversize_packets" },
7437	{ "rx_64_byte_packets" },
7438	{ "rx_65_to_127_byte_packets" },
7439	{ "rx_128_to_255_byte_packets" },
7440	{ "rx_256_to_511_byte_packets" },
7441	{ "rx_512_to_1023_byte_packets" },
7442	{ "rx_1024_to_1522_byte_packets" },
7443	{ "rx_1523_to_9022_byte_packets" },
7444	{ "tx_64_byte_packets" },
7445	{ "tx_65_to_127_byte_packets" },
7446	{ "tx_128_to_255_byte_packets" },
7447	{ "tx_256_to_511_byte_packets" },
7448	{ "tx_512_to_1023_byte_packets" },
7449	{ "tx_1024_to_1522_byte_packets" },
7450	{ "tx_1523_to_9022_byte_packets" },
7451	{ "rx_xon_frames" },
7452	{ "rx_xoff_frames" },
7453	{ "tx_xon_frames" },
7454	{ "tx_xoff_frames" },
7455	{ "rx_mac_ctrl_frames" },
7456	{ "rx_filtered_packets" },
7457	{ "rx_ftq_discards" },
7458	{ "rx_discards" },
7459	{ "rx_fw_discards" },
7460};
7461
7462#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7463
7464#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7465
7466static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7467    STATS_OFFSET32(stat_IfHCInOctets_hi),
7468    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7469    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7470    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7471    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7472    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7473    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7474    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7475    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7476    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7477    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7478    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7479    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7480    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7481    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7482    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7483    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7484    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7485    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7486    STATS_OFFSET32(stat_EtherStatsCollisions),
7487    STATS_OFFSET32(stat_EtherStatsFragments),
7488    STATS_OFFSET32(stat_EtherStatsJabbers),
7489    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7490    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7491    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7492    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7493    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7494    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7495    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7496    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7497    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7498    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7499    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7500    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7501    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7502    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7503    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7504    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7505    STATS_OFFSET32(stat_XonPauseFramesReceived),
7506    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7507    STATS_OFFSET32(stat_OutXonSent),
7508    STATS_OFFSET32(stat_OutXoffSent),
7509    STATS_OFFSET32(stat_MacControlFramesReceived),
7510    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7511    STATS_OFFSET32(stat_IfInFTQDiscards),
7512    STATS_OFFSET32(stat_IfInMBUFDiscards),
7513    STATS_OFFSET32(stat_FwRxDrop),
7514};
7515
7516/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7517 * skipped because of errata.
7518 */
7519static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7520	8,0,8,8,8,8,8,8,8,8,
7521	4,0,4,4,4,4,4,4,4,4,
7522	4,4,4,4,4,4,4,4,4,4,
7523	4,4,4,4,4,4,4,4,4,4,
7524	4,4,4,4,4,4,4,
7525};
7526
7527static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7528	8,0,8,8,8,8,8,8,8,8,
7529	4,4,4,4,4,4,4,4,4,4,
7530	4,4,4,4,4,4,4,4,4,4,
7531	4,4,4,4,4,4,4,4,4,4,
7532	4,4,4,4,4,4,4,
7533};
7534
7535#define BNX2_NUM_TESTS 6
7536
7537static struct {
7538	char string[ETH_GSTRING_LEN];
7539} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7540	{ "register_test (offline)" },
7541	{ "memory_test (offline)" },
7542	{ "loopback_test (offline)" },
7543	{ "nvram_test (online)" },
7544	{ "interrupt_test (online)" },
7545	{ "link_test (online)" },
7546};
7547
7548static int
7549bnx2_get_sset_count(struct net_device *dev, int sset)
7550{
7551	switch (sset) {
7552	case ETH_SS_TEST:
7553		return BNX2_NUM_TESTS;
7554	case ETH_SS_STATS:
7555		return BNX2_NUM_STATS;
7556	default:
7557		return -EOPNOTSUPP;
7558	}
7559}
7560
7561static void
7562bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7563{
7564	struct bnx2 *bp = netdev_priv(dev);
7565
7566	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7567	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7568		int i;
7569
7570		bnx2_netif_stop(bp, true);
7571		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7572		bnx2_free_skbs(bp);
7573
7574		if (bnx2_test_registers(bp) != 0) {
7575			buf[0] = 1;
7576			etest->flags |= ETH_TEST_FL_FAILED;
7577		}
7578		if (bnx2_test_memory(bp) != 0) {
7579			buf[1] = 1;
7580			etest->flags |= ETH_TEST_FL_FAILED;
7581		}
7582		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7583			etest->flags |= ETH_TEST_FL_FAILED;
7584
7585		if (!netif_running(bp->dev))
7586			bnx2_shutdown_chip(bp);
7587		else {
7588			bnx2_init_nic(bp, 1);
7589			bnx2_netif_start(bp, true);
7590		}
7591
7592		/* wait for link up */
7593		for (i = 0; i < 7; i++) {
7594			if (bp->link_up)
7595				break;
7596			msleep_interruptible(1000);
7597		}
7598	}
7599
7600	if (bnx2_test_nvram(bp) != 0) {
7601		buf[3] = 1;
7602		etest->flags |= ETH_TEST_FL_FAILED;
7603	}
7604	if (bnx2_test_intr(bp) != 0) {
7605		buf[4] = 1;
7606		etest->flags |= ETH_TEST_FL_FAILED;
7607	}
7608
7609	if (bnx2_test_link(bp) != 0) {
7610		buf[5] = 1;
7611		etest->flags |= ETH_TEST_FL_FAILED;
7612
7613	}
7614}
7615
7616static void
7617bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7618{
7619	switch (stringset) {
7620	case ETH_SS_STATS:
7621		memcpy(buf, bnx2_stats_str_arr,
7622			sizeof(bnx2_stats_str_arr));
7623		break;
7624	case ETH_SS_TEST:
7625		memcpy(buf, bnx2_tests_str_arr,
7626			sizeof(bnx2_tests_str_arr));
7627		break;
7628	}
7629}
7630
7631static void
7632bnx2_get_ethtool_stats(struct net_device *dev,
7633		struct ethtool_stats *stats, u64 *buf)
7634{
7635	struct bnx2 *bp = netdev_priv(dev);
7636	int i;
7637	u32 *hw_stats = (u32 *) bp->stats_blk;
7638	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7639	u8 *stats_len_arr = NULL;
7640
7641	if (hw_stats == NULL) {
7642		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7643		return;
7644	}
7645
7646	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7647	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7648	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7649	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7650		stats_len_arr = bnx2_5706_stats_len_arr;
7651	else
7652		stats_len_arr = bnx2_5708_stats_len_arr;
7653
7654	for (i = 0; i < BNX2_NUM_STATS; i++) {
7655		unsigned long offset;
7656
7657		if (stats_len_arr[i] == 0) {
7658			/* skip this counter */
7659			buf[i] = 0;
7660			continue;
7661		}
7662
7663		offset = bnx2_stats_offset_arr[i];
7664		if (stats_len_arr[i] == 4) {
7665			/* 4-byte counter */
7666			buf[i] = (u64) *(hw_stats + offset) +
7667				 *(temp_stats + offset);
7668			continue;
7669		}
7670		/* 8-byte counter */
7671		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7672			 *(hw_stats + offset + 1) +
7673			 (((u64) *(temp_stats + offset)) << 32) +
7674			 *(temp_stats + offset + 1);
7675	}
7676}
7677
7678static int
7679bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7680{
7681	struct bnx2 *bp = netdev_priv(dev);
7682
7683	switch (state) {
7684	case ETHTOOL_ID_ACTIVE:
7685		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7686		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7687		return 1;	/* cycle on/off once per second */
7688
7689	case ETHTOOL_ID_ON:
7690		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7691			BNX2_EMAC_LED_1000MB_OVERRIDE |
7692			BNX2_EMAC_LED_100MB_OVERRIDE |
7693			BNX2_EMAC_LED_10MB_OVERRIDE |
7694			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7695			BNX2_EMAC_LED_TRAFFIC);
7696		break;
7697
7698	case ETHTOOL_ID_OFF:
7699		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7700		break;
7701
7702	case ETHTOOL_ID_INACTIVE:
7703		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7704		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7705		break;
7706	}
7707
7708	return 0;
7709}
7710
7711static int
7712bnx2_set_features(struct net_device *dev, netdev_features_t features)
7713{
7714	struct bnx2 *bp = netdev_priv(dev);
7715
7716	/* TSO with VLAN tag won't work with current firmware */
7717	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7718		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7719	else
7720		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7721
7722	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7723	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7724	    netif_running(dev)) {
7725		bnx2_netif_stop(bp, false);
7726		dev->features = features;
7727		bnx2_set_rx_mode(dev);
7728		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7729		bnx2_netif_start(bp, false);
7730		return 1;
7731	}
7732
7733	return 0;
7734}
7735
7736static void bnx2_get_channels(struct net_device *dev,
7737			      struct ethtool_channels *channels)
7738{
7739	struct bnx2 *bp = netdev_priv(dev);
7740	u32 max_rx_rings = 1;
7741	u32 max_tx_rings = 1;
7742
7743	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7744		max_rx_rings = RX_MAX_RINGS;
7745		max_tx_rings = TX_MAX_RINGS;
7746	}
7747
7748	channels->max_rx = max_rx_rings;
7749	channels->max_tx = max_tx_rings;
7750	channels->max_other = 0;
7751	channels->max_combined = 0;
7752	channels->rx_count = bp->num_rx_rings;
7753	channels->tx_count = bp->num_tx_rings;
7754	channels->other_count = 0;
7755	channels->combined_count = 0;
7756}
7757
7758static int bnx2_set_channels(struct net_device *dev,
7759			      struct ethtool_channels *channels)
7760{
7761	struct bnx2 *bp = netdev_priv(dev);
7762	u32 max_rx_rings = 1;
7763	u32 max_tx_rings = 1;
7764	int rc = 0;
7765
7766	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7767		max_rx_rings = RX_MAX_RINGS;
7768		max_tx_rings = TX_MAX_RINGS;
7769	}
7770	if (channels->rx_count > max_rx_rings ||
7771	    channels->tx_count > max_tx_rings)
7772		return -EINVAL;
7773
7774	bp->num_req_rx_rings = channels->rx_count;
7775	bp->num_req_tx_rings = channels->tx_count;
7776
7777	if (netif_running(dev))
7778		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7779					   bp->tx_ring_size, true);
7780
7781	return rc;
7782}
7783
7784static const struct ethtool_ops bnx2_ethtool_ops = {
7785	.get_settings		= bnx2_get_settings,
7786	.set_settings		= bnx2_set_settings,
7787	.get_drvinfo		= bnx2_get_drvinfo,
7788	.get_regs_len		= bnx2_get_regs_len,
7789	.get_regs		= bnx2_get_regs,
7790	.get_wol		= bnx2_get_wol,
7791	.set_wol		= bnx2_set_wol,
7792	.nway_reset		= bnx2_nway_reset,
7793	.get_link		= bnx2_get_link,
7794	.get_eeprom_len		= bnx2_get_eeprom_len,
7795	.get_eeprom		= bnx2_get_eeprom,
7796	.set_eeprom		= bnx2_set_eeprom,
7797	.get_coalesce		= bnx2_get_coalesce,
7798	.set_coalesce		= bnx2_set_coalesce,
7799	.get_ringparam		= bnx2_get_ringparam,
7800	.set_ringparam		= bnx2_set_ringparam,
7801	.get_pauseparam		= bnx2_get_pauseparam,
7802	.set_pauseparam		= bnx2_set_pauseparam,
7803	.self_test		= bnx2_self_test,
7804	.get_strings		= bnx2_get_strings,
7805	.set_phys_id		= bnx2_set_phys_id,
7806	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7807	.get_sset_count		= bnx2_get_sset_count,
7808	.get_channels		= bnx2_get_channels,
7809	.set_channels		= bnx2_set_channels,
7810};
7811
7812/* Called with rtnl_lock */
7813static int
7814bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7815{
7816	struct mii_ioctl_data *data = if_mii(ifr);
7817	struct bnx2 *bp = netdev_priv(dev);
7818	int err;
7819
7820	switch(cmd) {
7821	case SIOCGMIIPHY:
7822		data->phy_id = bp->phy_addr;
7823
7824		/* fallthru */
7825	case SIOCGMIIREG: {
7826		u32 mii_regval;
7827
7828		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7829			return -EOPNOTSUPP;
7830
7831		if (!netif_running(dev))
7832			return -EAGAIN;
7833
7834		spin_lock_bh(&bp->phy_lock);
7835		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7836		spin_unlock_bh(&bp->phy_lock);
7837
7838		data->val_out = mii_regval;
7839
7840		return err;
7841	}
7842
7843	case SIOCSMIIREG:
7844		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7845			return -EOPNOTSUPP;
7846
7847		if (!netif_running(dev))
7848			return -EAGAIN;
7849
7850		spin_lock_bh(&bp->phy_lock);
7851		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7852		spin_unlock_bh(&bp->phy_lock);
7853
7854		return err;
7855
7856	default:
7857		/* do nothing */
7858		break;
7859	}
7860	return -EOPNOTSUPP;
7861}
7862
7863/* Called with rtnl_lock */
7864static int
7865bnx2_change_mac_addr(struct net_device *dev, void *p)
7866{
7867	struct sockaddr *addr = p;
7868	struct bnx2 *bp = netdev_priv(dev);
7869
7870	if (!is_valid_ether_addr(addr->sa_data))
7871		return -EADDRNOTAVAIL;
7872
7873	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7874	if (netif_running(dev))
7875		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7876
7877	return 0;
7878}
7879
7880/* Called with rtnl_lock */
7881static int
7882bnx2_change_mtu(struct net_device *dev, int new_mtu)
7883{
7884	struct bnx2 *bp = netdev_priv(dev);
7885
7886	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7887		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7888		return -EINVAL;
7889
7890	dev->mtu = new_mtu;
7891	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7892				     false);
7893}
7894
7895#ifdef CONFIG_NET_POLL_CONTROLLER
7896static void
7897poll_bnx2(struct net_device *dev)
7898{
7899	struct bnx2 *bp = netdev_priv(dev);
7900	int i;
7901
7902	for (i = 0; i < bp->irq_nvecs; i++) {
7903		struct bnx2_irq *irq = &bp->irq_tbl[i];
7904
7905		disable_irq(irq->vector);
7906		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7907		enable_irq(irq->vector);
7908	}
7909}
7910#endif
7911
7912static void
7913bnx2_get_5709_media(struct bnx2 *bp)
7914{
7915	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7916	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7917	u32 strap;
7918
7919	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7920		return;
7921	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7922		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7923		return;
7924	}
7925
7926	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7927		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7928	else
7929		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7930
7931	if (bp->func == 0) {
7932		switch (strap) {
7933		case 0x4:
7934		case 0x5:
7935		case 0x6:
7936			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7937			return;
7938		}
7939	} else {
7940		switch (strap) {
7941		case 0x1:
7942		case 0x2:
7943		case 0x4:
7944			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7945			return;
7946		}
7947	}
7948}
7949
7950static void
7951bnx2_get_pci_speed(struct bnx2 *bp)
7952{
7953	u32 reg;
7954
7955	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7956	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7957		u32 clkreg;
7958
7959		bp->flags |= BNX2_FLAG_PCIX;
7960
7961		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7962
7963		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7964		switch (clkreg) {
7965		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7966			bp->bus_speed_mhz = 133;
7967			break;
7968
7969		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7970			bp->bus_speed_mhz = 100;
7971			break;
7972
7973		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7974		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7975			bp->bus_speed_mhz = 66;
7976			break;
7977
7978		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7979		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7980			bp->bus_speed_mhz = 50;
7981			break;
7982
7983		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7984		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7985		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7986			bp->bus_speed_mhz = 33;
7987			break;
7988		}
7989	}
7990	else {
7991		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7992			bp->bus_speed_mhz = 66;
7993		else
7994			bp->bus_speed_mhz = 33;
7995	}
7996
7997	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7998		bp->flags |= BNX2_FLAG_PCI_32BIT;
7999
8000}
8001
8002static void
8003bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8004{
8005	int rc, i, j;
8006	u8 *data;
8007	unsigned int block_end, rosize, len;
8008
8009#define BNX2_VPD_NVRAM_OFFSET	0x300
8010#define BNX2_VPD_LEN		128
8011#define BNX2_MAX_VER_SLEN	30
8012
8013	data = kmalloc(256, GFP_KERNEL);
8014	if (!data)
8015		return;
8016
8017	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8018			     BNX2_VPD_LEN);
8019	if (rc)
8020		goto vpd_done;
8021
8022	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8023		data[i] = data[i + BNX2_VPD_LEN + 3];
8024		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8025		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8026		data[i + 3] = data[i + BNX2_VPD_LEN];
8027	}
8028
8029	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8030	if (i < 0)
8031		goto vpd_done;
8032
8033	rosize = pci_vpd_lrdt_size(&data[i]);
8034	i += PCI_VPD_LRDT_TAG_SIZE;
8035	block_end = i + rosize;
8036
8037	if (block_end > BNX2_VPD_LEN)
8038		goto vpd_done;
8039
8040	j = pci_vpd_find_info_keyword(data, i, rosize,
8041				      PCI_VPD_RO_KEYWORD_MFR_ID);
8042	if (j < 0)
8043		goto vpd_done;
8044
8045	len = pci_vpd_info_field_size(&data[j]);
8046
8047	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8048	if (j + len > block_end || len != 4 ||
8049	    memcmp(&data[j], "1028", 4))
8050		goto vpd_done;
8051
8052	j = pci_vpd_find_info_keyword(data, i, rosize,
8053				      PCI_VPD_RO_KEYWORD_VENDOR0);
8054	if (j < 0)
8055		goto vpd_done;
8056
8057	len = pci_vpd_info_field_size(&data[j]);
8058
8059	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8060	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8061		goto vpd_done;
8062
8063	memcpy(bp->fw_version, &data[j], len);
8064	bp->fw_version[len] = ' ';
8065
8066vpd_done:
8067	kfree(data);
8068}
8069
8070static int
8071bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8072{
8073	struct bnx2 *bp;
8074	int rc, i, j;
8075	u32 reg;
8076	u64 dma_mask, persist_dma_mask;
8077	int err;
8078
8079	SET_NETDEV_DEV(dev, &pdev->dev);
8080	bp = netdev_priv(dev);
8081
8082	bp->flags = 0;
8083	bp->phy_flags = 0;
8084
8085	bp->temp_stats_blk =
8086		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8087
8088	if (bp->temp_stats_blk == NULL) {
8089		rc = -ENOMEM;
8090		goto err_out;
8091	}
8092
8093	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8094	rc = pci_enable_device(pdev);
8095	if (rc) {
8096		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8097		goto err_out;
8098	}
8099
8100	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8101		dev_err(&pdev->dev,
8102			"Cannot find PCI device base address, aborting\n");
8103		rc = -ENODEV;
8104		goto err_out_disable;
8105	}
8106
8107	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8108	if (rc) {
8109		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8110		goto err_out_disable;
8111	}
8112
8113	pci_set_master(pdev);
8114
8115	bp->pm_cap = pdev->pm_cap;
8116	if (bp->pm_cap == 0) {
8117		dev_err(&pdev->dev,
8118			"Cannot find power management capability, aborting\n");
8119		rc = -EIO;
8120		goto err_out_release;
8121	}
8122
8123	bp->dev = dev;
8124	bp->pdev = pdev;
8125
8126	spin_lock_init(&bp->phy_lock);
8127	spin_lock_init(&bp->indirect_lock);
8128#ifdef BCM_CNIC
8129	mutex_init(&bp->cnic_lock);
8130#endif
8131	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8132
8133	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8134							 TX_MAX_TSS_RINGS + 1));
8135	if (!bp->regview) {
8136		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8137		rc = -ENOMEM;
8138		goto err_out_release;
8139	}
8140
8141	/* Configure byte swap and enable write to the reg_window registers.
8142	 * Rely on CPU to do target byte swapping on big endian systems
8143	 * The chip's target access swapping will not swap all accesses
8144	 */
8145	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8146		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8147		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8148
8149	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8150
8151	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8152		if (!pci_is_pcie(pdev)) {
8153			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8154			rc = -EIO;
8155			goto err_out_unmap;
8156		}
8157		bp->flags |= BNX2_FLAG_PCIE;
8158		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8159			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8160
8161		/* AER (Advanced Error Reporting) hooks */
8162		err = pci_enable_pcie_error_reporting(pdev);
8163		if (!err)
8164			bp->flags |= BNX2_FLAG_AER_ENABLED;
8165
8166	} else {
8167		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8168		if (bp->pcix_cap == 0) {
8169			dev_err(&pdev->dev,
8170				"Cannot find PCIX capability, aborting\n");
8171			rc = -EIO;
8172			goto err_out_unmap;
8173		}
8174		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8175	}
8176
8177	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8178	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8179		if (pdev->msix_cap)
8180			bp->flags |= BNX2_FLAG_MSIX_CAP;
8181	}
8182
8183	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8184	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8185		if (pdev->msi_cap)
8186			bp->flags |= BNX2_FLAG_MSI_CAP;
8187	}
8188
8189	/* 5708 cannot support DMA addresses > 40-bit.  */
8190	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8191		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8192	else
8193		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8194
8195	/* Configure DMA attributes. */
8196	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8197		dev->features |= NETIF_F_HIGHDMA;
8198		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8199		if (rc) {
8200			dev_err(&pdev->dev,
8201				"pci_set_consistent_dma_mask failed, aborting\n");
8202			goto err_out_unmap;
8203		}
8204	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8205		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8206		goto err_out_unmap;
8207	}
8208
8209	if (!(bp->flags & BNX2_FLAG_PCIE))
8210		bnx2_get_pci_speed(bp);
8211
8212	/* 5706A0 may falsely detect SERR and PERR. */
8213	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8214		reg = BNX2_RD(bp, PCI_COMMAND);
8215		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8216		BNX2_WR(bp, PCI_COMMAND, reg);
8217	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8218		!(bp->flags & BNX2_FLAG_PCIX)) {
8219
8220		dev_err(&pdev->dev,
8221			"5706 A1 can only be used in a PCIX bus, aborting\n");
8222		goto err_out_unmap;
8223	}
8224
8225	bnx2_init_nvram(bp);
8226
8227	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8228
8229	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8230		bp->func = 1;
8231
8232	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8233	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8234		u32 off = bp->func << 2;
8235
8236		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8237	} else
8238		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8239
8240	/* Get the permanent MAC address.  First we need to make sure the
8241	 * firmware is actually running.
8242	 */
8243	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8244
8245	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8246	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8247		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8248		rc = -ENODEV;
8249		goto err_out_unmap;
8250	}
8251
8252	bnx2_read_vpd_fw_ver(bp);
8253
8254	j = strlen(bp->fw_version);
8255	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8256	for (i = 0; i < 3 && j < 24; i++) {
8257		u8 num, k, skip0;
8258
8259		if (i == 0) {
8260			bp->fw_version[j++] = 'b';
8261			bp->fw_version[j++] = 'c';
8262			bp->fw_version[j++] = ' ';
8263		}
8264		num = (u8) (reg >> (24 - (i * 8)));
8265		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8266			if (num >= k || !skip0 || k == 1) {
8267				bp->fw_version[j++] = (num / k) + '0';
8268				skip0 = 0;
8269			}
8270		}
8271		if (i != 2)
8272			bp->fw_version[j++] = '.';
8273	}
8274	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8275	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8276		bp->wol = 1;
8277
8278	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8279		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8280
8281		for (i = 0; i < 30; i++) {
8282			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8283			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8284				break;
8285			msleep(10);
8286		}
8287	}
8288	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8289	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8290	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8291	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8292		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8293
8294		if (j < 32)
8295			bp->fw_version[j++] = ' ';
8296		for (i = 0; i < 3 && j < 28; i++) {
8297			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8298			reg = be32_to_cpu(reg);
8299			memcpy(&bp->fw_version[j], &reg, 4);
8300			j += 4;
8301		}
8302	}
8303
8304	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8305	bp->mac_addr[0] = (u8) (reg >> 8);
8306	bp->mac_addr[1] = (u8) reg;
8307
8308	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8309	bp->mac_addr[2] = (u8) (reg >> 24);
8310	bp->mac_addr[3] = (u8) (reg >> 16);
8311	bp->mac_addr[4] = (u8) (reg >> 8);
8312	bp->mac_addr[5] = (u8) reg;
8313
8314	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8315	bnx2_set_rx_ring_size(bp, 255);
8316
8317	bp->tx_quick_cons_trip_int = 2;
8318	bp->tx_quick_cons_trip = 20;
8319	bp->tx_ticks_int = 18;
8320	bp->tx_ticks = 80;
8321
8322	bp->rx_quick_cons_trip_int = 2;
8323	bp->rx_quick_cons_trip = 12;
8324	bp->rx_ticks_int = 18;
8325	bp->rx_ticks = 18;
8326
8327	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8328
8329	bp->current_interval = BNX2_TIMER_INTERVAL;
8330
8331	bp->phy_addr = 1;
8332
8333	/* Disable WOL support if we are running on a SERDES chip. */
8334	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8335		bnx2_get_5709_media(bp);
8336	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8337		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8338
8339	bp->phy_port = PORT_TP;
8340	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8341		bp->phy_port = PORT_FIBRE;
8342		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8343		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8344			bp->flags |= BNX2_FLAG_NO_WOL;
8345			bp->wol = 0;
8346		}
8347		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8348			/* Don't do parallel detect on this board because of
8349			 * some board problems.  The link will not go down
8350			 * if we do parallel detect.
8351			 */
8352			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8353			    pdev->subsystem_device == 0x310c)
8354				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8355		} else {
8356			bp->phy_addr = 2;
8357			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8358				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8359		}
8360	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8361		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8362		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8363	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8364		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8365		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8366		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8367
8368	bnx2_init_fw_cap(bp);
8369
8370	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8371	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8372	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8373	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8374		bp->flags |= BNX2_FLAG_NO_WOL;
8375		bp->wol = 0;
8376	}
8377
8378	if (bp->flags & BNX2_FLAG_NO_WOL)
8379		device_set_wakeup_capable(&bp->pdev->dev, false);
8380	else
8381		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8382
8383	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8384		bp->tx_quick_cons_trip_int =
8385			bp->tx_quick_cons_trip;
8386		bp->tx_ticks_int = bp->tx_ticks;
8387		bp->rx_quick_cons_trip_int =
8388			bp->rx_quick_cons_trip;
8389		bp->rx_ticks_int = bp->rx_ticks;
8390		bp->comp_prod_trip_int = bp->comp_prod_trip;
8391		bp->com_ticks_int = bp->com_ticks;
8392		bp->cmd_ticks_int = bp->cmd_ticks;
8393	}
8394
8395	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8396	 *
8397	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8398	 * with byte enables disabled on the unused 32-bit word.  This is legal
8399	 * but causes problems on the AMD 8132 which will eventually stop
8400	 * responding after a while.
8401	 *
8402	 * AMD believes this incompatibility is unique to the 5706, and
8403	 * prefers to locally disable MSI rather than globally disabling it.
8404	 */
8405	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8406		struct pci_dev *amd_8132 = NULL;
8407
8408		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8409						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8410						  amd_8132))) {
8411
8412			if (amd_8132->revision >= 0x10 &&
8413			    amd_8132->revision <= 0x13) {
8414				disable_msi = 1;
8415				pci_dev_put(amd_8132);
8416				break;
8417			}
8418		}
8419	}
8420
8421	bnx2_set_default_link(bp);
8422	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8423
8424	init_timer(&bp->timer);
8425	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8426	bp->timer.data = (unsigned long) bp;
8427	bp->timer.function = bnx2_timer;
8428
8429#ifdef BCM_CNIC
8430	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8431		bp->cnic_eth_dev.max_iscsi_conn =
8432			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8433			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8434	bp->cnic_probe = bnx2_cnic_probe;
8435#endif
8436	pci_save_state(pdev);
8437
8438	return 0;
8439
8440err_out_unmap:
8441	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8442		pci_disable_pcie_error_reporting(pdev);
8443		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8444	}
8445
8446	pci_iounmap(pdev, bp->regview);
8447	bp->regview = NULL;
8448
8449err_out_release:
8450	pci_release_regions(pdev);
8451
8452err_out_disable:
8453	pci_disable_device(pdev);
8454
8455err_out:
8456	return rc;
8457}
8458
8459static char *
8460bnx2_bus_string(struct bnx2 *bp, char *str)
8461{
8462	char *s = str;
8463
8464	if (bp->flags & BNX2_FLAG_PCIE) {
8465		s += sprintf(s, "PCI Express");
8466	} else {
8467		s += sprintf(s, "PCI");
8468		if (bp->flags & BNX2_FLAG_PCIX)
8469			s += sprintf(s, "-X");
8470		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8471			s += sprintf(s, " 32-bit");
8472		else
8473			s += sprintf(s, " 64-bit");
8474		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8475	}
8476	return str;
8477}
8478
8479static void
8480bnx2_del_napi(struct bnx2 *bp)
8481{
8482	int i;
8483
8484	for (i = 0; i < bp->irq_nvecs; i++)
8485		netif_napi_del(&bp->bnx2_napi[i].napi);
8486}
8487
8488static void
8489bnx2_init_napi(struct bnx2 *bp)
8490{
8491	int i;
8492
8493	for (i = 0; i < bp->irq_nvecs; i++) {
8494		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8495		int (*poll)(struct napi_struct *, int);
8496
8497		if (i == 0)
8498			poll = bnx2_poll;
8499		else
8500			poll = bnx2_poll_msix;
8501
8502		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8503		bnapi->bp = bp;
8504	}
8505}
8506
8507static const struct net_device_ops bnx2_netdev_ops = {
8508	.ndo_open		= bnx2_open,
8509	.ndo_start_xmit		= bnx2_start_xmit,
8510	.ndo_stop		= bnx2_close,
8511	.ndo_get_stats64	= bnx2_get_stats64,
8512	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8513	.ndo_do_ioctl		= bnx2_ioctl,
8514	.ndo_validate_addr	= eth_validate_addr,
8515	.ndo_set_mac_address	= bnx2_change_mac_addr,
8516	.ndo_change_mtu		= bnx2_change_mtu,
8517	.ndo_set_features	= bnx2_set_features,
8518	.ndo_tx_timeout		= bnx2_tx_timeout,
8519#ifdef CONFIG_NET_POLL_CONTROLLER
8520	.ndo_poll_controller	= poll_bnx2,
8521#endif
8522};
8523
8524static int
8525bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8526{
8527	static int version_printed = 0;
8528	struct net_device *dev;
8529	struct bnx2 *bp;
8530	int rc;
8531	char str[40];
8532
8533	if (version_printed++ == 0)
8534		pr_info("%s", version);
8535
8536	/* dev zeroed in init_etherdev */
8537	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8538	if (!dev)
8539		return -ENOMEM;
8540
8541	rc = bnx2_init_board(pdev, dev);
8542	if (rc < 0)
8543		goto err_free;
8544
8545	dev->netdev_ops = &bnx2_netdev_ops;
8546	dev->watchdog_timeo = TX_TIMEOUT;
8547	dev->ethtool_ops = &bnx2_ethtool_ops;
8548
8549	bp = netdev_priv(dev);
8550
8551	pci_set_drvdata(pdev, dev);
8552
8553	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8554
8555	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8556		NETIF_F_TSO | NETIF_F_TSO_ECN |
8557		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8558
8559	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8560		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8561
8562	dev->vlan_features = dev->hw_features;
8563	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8564	dev->features |= dev->hw_features;
8565	dev->priv_flags |= IFF_UNICAST_FLT;
8566
8567	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8568		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8569
8570	if ((rc = register_netdev(dev))) {
8571		dev_err(&pdev->dev, "Cannot register net device\n");
8572		goto error;
8573	}
8574
8575	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8576		    "node addr %pM\n", board_info[ent->driver_data].name,
8577		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8578		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8579		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8580		    pdev->irq, dev->dev_addr);
8581
8582	return 0;
8583
8584error:
8585	pci_iounmap(pdev, bp->regview);
8586	pci_release_regions(pdev);
8587	pci_disable_device(pdev);
8588err_free:
8589	free_netdev(dev);
8590	return rc;
8591}
8592
8593static void
8594bnx2_remove_one(struct pci_dev *pdev)
8595{
8596	struct net_device *dev = pci_get_drvdata(pdev);
8597	struct bnx2 *bp = netdev_priv(dev);
8598
8599	unregister_netdev(dev);
8600
8601	del_timer_sync(&bp->timer);
8602	cancel_work_sync(&bp->reset_task);
8603
8604	pci_iounmap(bp->pdev, bp->regview);
8605
8606	kfree(bp->temp_stats_blk);
8607
8608	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8609		pci_disable_pcie_error_reporting(pdev);
8610		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8611	}
8612
8613	bnx2_release_firmware(bp);
8614
8615	free_netdev(dev);
8616
8617	pci_release_regions(pdev);
8618	pci_disable_device(pdev);
8619}
8620
8621#ifdef CONFIG_PM_SLEEP
8622static int
8623bnx2_suspend(struct device *device)
8624{
8625	struct pci_dev *pdev = to_pci_dev(device);
8626	struct net_device *dev = pci_get_drvdata(pdev);
8627	struct bnx2 *bp = netdev_priv(dev);
8628
8629	if (netif_running(dev)) {
8630		cancel_work_sync(&bp->reset_task);
8631		bnx2_netif_stop(bp, true);
8632		netif_device_detach(dev);
8633		del_timer_sync(&bp->timer);
8634		bnx2_shutdown_chip(bp);
8635		__bnx2_free_irq(bp);
8636		bnx2_free_skbs(bp);
8637	}
8638	bnx2_setup_wol(bp);
8639	return 0;
8640}
8641
8642static int
8643bnx2_resume(struct device *device)
8644{
8645	struct pci_dev *pdev = to_pci_dev(device);
8646	struct net_device *dev = pci_get_drvdata(pdev);
8647	struct bnx2 *bp = netdev_priv(dev);
8648
8649	if (!netif_running(dev))
8650		return 0;
8651
8652	bnx2_set_power_state(bp, PCI_D0);
8653	netif_device_attach(dev);
8654	bnx2_request_irq(bp);
8655	bnx2_init_nic(bp, 1);
8656	bnx2_netif_start(bp, true);
8657	return 0;
8658}
8659
8660static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8661#define BNX2_PM_OPS (&bnx2_pm_ops)
8662
8663#else
8664
8665#define BNX2_PM_OPS NULL
8666
8667#endif /* CONFIG_PM_SLEEP */
8668/**
8669 * bnx2_io_error_detected - called when PCI error is detected
8670 * @pdev: Pointer to PCI device
8671 * @state: The current pci connection state
8672 *
8673 * This function is called after a PCI bus error affecting
8674 * this device has been detected.
8675 */
8676static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8677					       pci_channel_state_t state)
8678{
8679	struct net_device *dev = pci_get_drvdata(pdev);
8680	struct bnx2 *bp = netdev_priv(dev);
8681
8682	rtnl_lock();
8683	netif_device_detach(dev);
8684
8685	if (state == pci_channel_io_perm_failure) {
8686		rtnl_unlock();
8687		return PCI_ERS_RESULT_DISCONNECT;
8688	}
8689
8690	if (netif_running(dev)) {
8691		bnx2_netif_stop(bp, true);
8692		del_timer_sync(&bp->timer);
8693		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8694	}
8695
8696	pci_disable_device(pdev);
8697	rtnl_unlock();
8698
8699	/* Request a slot slot reset. */
8700	return PCI_ERS_RESULT_NEED_RESET;
8701}
8702
8703/**
8704 * bnx2_io_slot_reset - called after the pci bus has been reset.
8705 * @pdev: Pointer to PCI device
8706 *
8707 * Restart the card from scratch, as if from a cold-boot.
8708 */
8709static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8710{
8711	struct net_device *dev = pci_get_drvdata(pdev);
8712	struct bnx2 *bp = netdev_priv(dev);
8713	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8714	int err = 0;
8715
8716	rtnl_lock();
8717	if (pci_enable_device(pdev)) {
8718		dev_err(&pdev->dev,
8719			"Cannot re-enable PCI device after reset\n");
8720	} else {
8721		pci_set_master(pdev);
8722		pci_restore_state(pdev);
8723		pci_save_state(pdev);
8724
8725		if (netif_running(dev))
8726			err = bnx2_init_nic(bp, 1);
8727
8728		if (!err)
8729			result = PCI_ERS_RESULT_RECOVERED;
8730	}
8731
8732	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8733		bnx2_napi_enable(bp);
8734		dev_close(dev);
8735	}
8736	rtnl_unlock();
8737
8738	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8739		return result;
8740
8741	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8742	if (err) {
8743		dev_err(&pdev->dev,
8744			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8745			 err); /* non-fatal, continue */
8746	}
8747
8748	return result;
8749}
8750
8751/**
8752 * bnx2_io_resume - called when traffic can start flowing again.
8753 * @pdev: Pointer to PCI device
8754 *
8755 * This callback is called when the error recovery driver tells us that
8756 * its OK to resume normal operation.
8757 */
8758static void bnx2_io_resume(struct pci_dev *pdev)
8759{
8760	struct net_device *dev = pci_get_drvdata(pdev);
8761	struct bnx2 *bp = netdev_priv(dev);
8762
8763	rtnl_lock();
8764	if (netif_running(dev))
8765		bnx2_netif_start(bp, true);
8766
8767	netif_device_attach(dev);
8768	rtnl_unlock();
8769}
8770
8771static void bnx2_shutdown(struct pci_dev *pdev)
8772{
8773	struct net_device *dev = pci_get_drvdata(pdev);
8774	struct bnx2 *bp;
8775
8776	if (!dev)
8777		return;
8778
8779	bp = netdev_priv(dev);
8780	if (!bp)
8781		return;
8782
8783	rtnl_lock();
8784	if (netif_running(dev))
8785		dev_close(bp->dev);
8786
8787	if (system_state == SYSTEM_POWER_OFF)
8788		bnx2_set_power_state(bp, PCI_D3hot);
8789
8790	rtnl_unlock();
8791}
8792
8793static const struct pci_error_handlers bnx2_err_handler = {
8794	.error_detected	= bnx2_io_error_detected,
8795	.slot_reset	= bnx2_io_slot_reset,
8796	.resume		= bnx2_io_resume,
8797};
8798
8799static struct pci_driver bnx2_pci_driver = {
8800	.name		= DRV_MODULE_NAME,
8801	.id_table	= bnx2_pci_tbl,
8802	.probe		= bnx2_init_one,
8803	.remove		= bnx2_remove_one,
8804	.driver.pm	= BNX2_PM_OPS,
8805	.err_handler	= &bnx2_err_handler,
8806	.shutdown	= bnx2_shutdown,
8807};
8808
8809module_pci_driver(bnx2_pci_driver);
8810