1/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
2/*
3	Written 1998-2000 by Donald Becker.
4	Updates 2000 by Keith Underwood.
5
6	This software may be used and distributed according to the terms of
7	the GNU General Public License (GPL), incorporated herein by reference.
8	Drivers based on or derived from this code fall under the GPL and must
9	retain the authorship, copyright and license notice.  This file is not
10	a complete program and may only be used when the entire operating
11	system is licensed under the GPL.
12
13	The author may be reached as becker@scyld.com, or C/O
14	Scyld Computing Corporation
15	410 Severn Ave., Suite 210
16	Annapolis MD 21403
17
18	This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
19	adapter.
20
21	Support and updates available at
22	http://www.scyld.com/network/hamachi.html
23	[link no longer provides useful info -jgarzik]
24	or
25	http://www.parl.clemson.edu/~keithu/hamachi.html
26
27*/
28
29#define DRV_NAME	"hamachi"
30#define DRV_VERSION	"2.1"
31#define DRV_RELDATE	"Sept 11, 2006"
32
33
34/* A few user-configurable values. */
35
36static int debug = 1;		/* 1 normal messages, 0 quiet .. 7 verbose.  */
37#define final_version
38#define hamachi_debug debug
39/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
40static int max_interrupt_work = 40;
41static int mtu;
42/* Default values selected by testing on a dual processor PIII-450 */
43/* These six interrupt control parameters may be set directly when loading the
44 * module, or through the rx_params and tx_params variables
45 */
46static int max_rx_latency = 0x11;
47static int max_rx_gap = 0x05;
48static int min_rx_pkt = 0x18;
49static int max_tx_latency = 0x00;
50static int max_tx_gap = 0x00;
51static int min_tx_pkt = 0x30;
52
53/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
54   -Setting to > 1518 causes all frames to be copied
55	-Setting to 0 disables copies
56*/
57static int rx_copybreak;
58
59/* An override for the hardware detection of bus width.
60	Set to 1 to force 32 bit PCI bus detection.  Set to 4 to force 64 bit.
61	Add 2 to disable parity detection.
62*/
63static int force32;
64
65
66/* Used to pass the media type, etc.
67   These exist for driver interoperability.
68   No media types are currently defined.
69		- The lower 4 bits are reserved for the media type.
70		- The next three bits may be set to one of the following:
71			0x00000000 : Autodetect PCI bus
72			0x00000010 : Force 32 bit PCI bus
73			0x00000020 : Disable parity detection
74			0x00000040 : Force 64 bit PCI bus
75			Default is autodetect
76		- The next bit can be used to force half-duplex.  This is a bad
77		  idea since no known implementations implement half-duplex, and,
78		  in general, half-duplex for gigabit ethernet is a bad idea.
79			0x00000080 : Force half-duplex
80			Default is full-duplex.
81		- In the original driver, the ninth bit could be used to force
82		  full-duplex.  Maintain that for compatibility
83		   0x00000200 : Force full-duplex
84*/
85#define MAX_UNITS 8				/* More are supported, limit only on options */
86static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88/* The Hamachi chipset supports 3 parameters each for Rx and Tx
89 * interruput management.  Parameters will be loaded as specified into
90 * the TxIntControl and RxIntControl registers.
91 *
92 * The registers are arranged as follows:
93 *     23 - 16   15 -  8   7    -    0
94 *    _________________________________
95 *   | min_pkt | max_gap | max_latency |
96 *    ---------------------------------
97 *   min_pkt      : The minimum number of packets processed between
98 *                  interrupts.
99 *   max_gap      : The maximum inter-packet gap in units of 8.192 us
100 *   max_latency  : The absolute time between interrupts in units of 8.192 us
101 *
102 */
103static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
104static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
105
106/* Operational parameters that are set at compile time. */
107
108/* Keep the ring sizes a power of two for compile efficiency.
109	The compiler will convert <unsigned>'%'<2^N> into a bit mask.
110   Making the Tx ring too large decreases the effectiveness of channel
111   bonding and packet priority.
112   There are no ill effects from too-large receive rings, except for
113	excessive memory usage */
114/* Empirically it appears that the Tx ring needs to be a little bigger
115   for these Gbit adapters or you get into an overrun condition really
116   easily.  Also, things appear to work a bit better in back-to-back
117   configurations if the Rx ring is 8 times the size of the Tx ring
118*/
119#define TX_RING_SIZE	64
120#define RX_RING_SIZE	512
121#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct hamachi_desc)
122#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct hamachi_desc)
123
124/*
125 * Enable netdev_ioctl.  Added interrupt coalescing parameter adjustment.
126 * 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov>
127 */
128
129/* play with 64-bit addrlen; seems to be a teensy bit slower  --pw */
130/* #define ADDRLEN 64 */
131
132/*
133 * RX_CHECKSUM turns on card-generated receive checksum generation for
134 *   TCP and UDP packets.  Otherwise the upper layers do the calculation.
135 * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
136 */
137#define RX_CHECKSUM
138
139/* Operational parameters that usually are not changed. */
140/* Time in jiffies before concluding the transmitter is hung. */
141#define TX_TIMEOUT  (5*HZ)
142
143#include <linux/capability.h>
144#include <linux/module.h>
145#include <linux/kernel.h>
146#include <linux/string.h>
147#include <linux/timer.h>
148#include <linux/time.h>
149#include <linux/errno.h>
150#include <linux/ioport.h>
151#include <linux/interrupt.h>
152#include <linux/pci.h>
153#include <linux/init.h>
154#include <linux/ethtool.h>
155#include <linux/mii.h>
156#include <linux/netdevice.h>
157#include <linux/etherdevice.h>
158#include <linux/skbuff.h>
159#include <linux/ip.h>
160#include <linux/delay.h>
161#include <linux/bitops.h>
162
163#include <asm/uaccess.h>
164#include <asm/processor.h>	/* Processor type for cache alignment. */
165#include <asm/io.h>
166#include <asm/unaligned.h>
167#include <asm/cache.h>
168
169static const char version[] =
170KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "  Written by Donald Becker\n"
171"   Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
172"   Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
173
174
175/* IP_MF appears to be only defined in <netinet/ip.h>, however,
176   we need it for hardware checksumming support.  FYI... some of
177   the definitions in <netinet/ip.h> conflict/duplicate those in
178   other linux headers causing many compiler warnings.
179*/
180#ifndef IP_MF
181  #define IP_MF 0x2000   /* IP more frags from <netinet/ip.h> */
182#endif
183
184/* Define IP_OFFSET to be IPOPT_OFFSET */
185#ifndef IP_OFFSET
186  #ifdef IPOPT_OFFSET
187    #define IP_OFFSET IPOPT_OFFSET
188  #else
189    #define IP_OFFSET 2
190  #endif
191#endif
192
193#define RUN_AT(x) (jiffies + (x))
194
195#ifndef ADDRLEN
196#define ADDRLEN 32
197#endif
198
199/* Condensed bus+endian portability operations. */
200#if ADDRLEN == 64
201#define cpu_to_leXX(addr)	cpu_to_le64(addr)
202#define leXX_to_cpu(addr)	le64_to_cpu(addr)
203#else
204#define cpu_to_leXX(addr)	cpu_to_le32(addr)
205#define leXX_to_cpu(addr)	le32_to_cpu(addr)
206#endif
207
208
209/*
210				Theory of Operation
211
212I. Board Compatibility
213
214This device driver is designed for the Packet Engines "Hamachi"
215Gigabit Ethernet chip.  The only PCA currently supported is the GNIC-II 64-bit
21666Mhz PCI card.
217
218II. Board-specific settings
219
220No jumpers exist on the board.  The chip supports software correction of
221various motherboard wiring errors, however this driver does not support
222that feature.
223
224III. Driver operation
225
226IIIa. Ring buffers
227
228The Hamachi uses a typical descriptor based bus-master architecture.
229The descriptor list is similar to that used by the Digital Tulip.
230This driver uses two statically allocated fixed-size descriptor lists
231formed into rings by a branch from the final descriptor to the beginning of
232the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
233
234This driver uses a zero-copy receive and transmit scheme similar my other
235network drivers.
236The driver allocates full frame size skbuffs for the Rx ring buffers at
237open() time and passes the skb->data field to the Hamachi as receive data
238buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
239a fresh skbuff is allocated and the frame is copied to the new skbuff.
240When the incoming frame is larger, the skbuff is passed directly up the
241protocol stack and replaced by a newly allocated skbuff.
242
243The RX_COPYBREAK value is chosen to trade-off the memory wasted by
244using a full-sized skbuff for small frames vs. the copying costs of larger
245frames.  Gigabit cards are typically used on generously configured machines
246and the underfilled buffers have negligible impact compared to the benefit of
247a single allocation size, so the default value of zero results in never
248copying packets.
249
250IIIb/c. Transmit/Receive Structure
251
252The Rx and Tx descriptor structure are straight-forward, with no historical
253baggage that must be explained.  Unlike the awkward DBDMA structure, there
254are no unused fields or option bits that had only one allowable setting.
255
256Two details should be noted about the descriptors: The chip supports both 32
257bit and 64 bit address structures, and the length field is overwritten on
258the receive descriptors.  The descriptor length is set in the control word
259for each channel. The development driver uses 32 bit addresses only, however
26064 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
261
262IIId. Synchronization
263
264This driver is very similar to my other network drivers.
265The driver runs as two independent, single-threaded flows of control.  One
266is the send-packet routine, which enforces single-threaded use by the
267dev->tbusy flag.  The other thread is the interrupt handler, which is single
268threaded by the hardware and other software.
269
270The send packet thread has partial control over the Tx ring and 'dev->tbusy'
271flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
272queue slot is empty, it clears the tbusy flag when finished otherwise it sets
273the 'hmp->tx_full' flag.
274
275The interrupt handler has exclusive control over the Rx ring and records stats
276from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
277empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
278clears both the tx_full and tbusy flags.
279
280IV. Notes
281
282Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
283
284IVb. References
285
286Hamachi Engineering Design Specification, 5/15/97
287(Note: This version was marked "Confidential".)
288
289IVc. Errata
290
291None noted.
292
293V.  Recent Changes
294
29501/15/1999 EPK  Enlargement of the TX and RX ring sizes.  This appears
296    to help avoid some stall conditions -- this needs further research.
297
29801/15/1999 EPK  Creation of the hamachi_tx function.  This function cleans
299    the Tx ring and is called from hamachi_start_xmit (this used to be
300    called from hamachi_interrupt but it tends to delay execution of the
301    interrupt handler and thus reduce bandwidth by reducing the latency
302    between hamachi_rx()'s).  Notably, some modification has been made so
303    that the cleaning loop checks only to make sure that the DescOwn bit
304    isn't set in the status flag since the card is not required
305    to set the entire flag to zero after processing.
306
30701/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
308    checked before attempting to add a buffer to the ring.  If the ring is full
309    an attempt is made to free any dirty buffers and thus find space for
310    the new buffer or the function returns non-zero which should case the
311    scheduler to reschedule the buffer later.
312
31301/15/1999 EPK Some adjustments were made to the chip initialization.
314    End-to-end flow control should now be fully active and the interrupt
315    algorithm vars have been changed.  These could probably use further tuning.
316
31701/15/1999 EPK Added the max_{rx,tx}_latency options.  These are used to
318    set the rx and tx latencies for the Hamachi interrupts. If you're having
319    problems with network stalls, try setting these to higher values.
320    Valid values are 0x00 through 0xff.
321
32201/15/1999 EPK In general, the overall bandwidth has increased and
323    latencies are better (sometimes by a factor of 2).  Stalls are rare at
324    this point, however there still appears to be a bug somewhere between the
325    hardware and driver.  TCP checksum errors under load also appear to be
326    eliminated at this point.
327
32801/18/1999 EPK Ensured that the DescEndRing bit was being set on both the
329    Rx and Tx rings.  This appears to have been affecting whether a particular
330    peer-to-peer connection would hang under high load.  I believe the Rx
331    rings was typically getting set correctly, but the Tx ring wasn't getting
332    the DescEndRing bit set during initialization. ??? Does this mean the
333    hamachi card is using the DescEndRing in processing even if a particular
334    slot isn't in use -- hypothetically, the card might be searching the
335    entire Tx ring for slots with the DescOwn bit set and then processing
336    them.  If the DescEndRing bit isn't set, then it might just wander off
337    through memory until it hits a chunk of data with that bit set
338    and then looping back.
339
34002/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
341    problem (TxCmd and RxCmd need only to be set when idle or stopped.
342
34302/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
344    (Michel Mueller pointed out the ``permanently busy'' potential
345    problem here).
346
34702/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
348
34902/23/1999 EPK Verified that the interrupt status field bits for Tx were
350    incorrectly defined and corrected (as per Michel Mueller).
351
35202/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
353    were available before resetting the tbusy and tx_full flags
354    (as per Michel Mueller).
355
35603/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
357
35812/31/1999 KDU Cleaned up assorted things and added Don's code to force
35932 bit.
360
36102/20/2000 KDU Some of the control was just plain odd.  Cleaned up the
362hamachi_start_xmit() and hamachi_interrupt() code.  There is still some
363re-structuring I would like to do.
364
36503/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
366parameters on a dual P3-450 setup yielded the new default interrupt
367mitigation parameters.  Tx should interrupt VERY infrequently due to
368Eric's scheme.  Rx should be more often...
369
37003/13/2000 KDU Added a patch to make the Rx Checksum code interact
371nicely with non-linux machines.
372
37303/13/2000 KDU Experimented with some of the configuration values:
374
375	-It seems that enabling PCI performance commands for descriptors
376	(changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
377	performance impact for any of my tests. (ttcp, netpipe, netperf)  I will
378	leave them that way until I hear further feedback.
379
380	-Increasing the PCI_LATENCY_TIMER to 130
381	(2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
382	degrade performance.  Leaving default at 64 pending further information.
383
38403/14/2000 KDU Further tuning:
385
386	-adjusted boguscnt in hamachi_rx() to depend on interrupt
387	mitigation parameters chosen.
388
389	-Selected a set of interrupt parameters based on some extensive testing.
390	These may change with more testing.
391
392TO DO:
393
394-Consider borrowing from the acenic driver code to check PCI_COMMAND for
395PCI_COMMAND_INVALIDATE.  Set maximum burst size to cache line size in
396that case.
397
398-fix the reset procedure.  It doesn't quite work.
399*/
400
401/* A few values that may be tweaked. */
402/* Size of each temporary Rx buffer, calculated as:
403 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
404 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
405 */
406#define PKT_BUF_SZ		1536
407
408/* For now, this is going to be set to the maximum size of an ethernet
409 * packet.  Eventually, we may want to make it a variable that is
410 * related to the MTU
411 */
412#define MAX_FRAME_SIZE  1518
413
414/* The rest of these values should never change. */
415
416static void hamachi_timer(unsigned long data);
417
418enum capability_flags {CanHaveMII=1, };
419static const struct chip_info {
420	u16	vendor_id, device_id, device_id_mask, pad;
421	const char *name;
422	void (*media_timer)(unsigned long data);
423	int flags;
424} chip_tbl[] = {
425	{0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
426	{0,},
427};
428
429/* Offsets to the Hamachi registers.  Various sizes. */
430enum hamachi_offsets {
431	TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
432	RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
433	PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
434	LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E,
435	TxChecksum=0x074, RxChecksum=0x076,
436	TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
437	InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
438	EventStatus=0x08C,
439	MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
440	/* See enum MII_offsets below. */
441	MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
442	AddrMode=0x0D0, StationAddr=0x0D2,
443	/* Gigabit AutoNegotiation. */
444	ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
445	ANLinkPartnerAbility=0x0EA,
446	EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
447	FIFOcfg=0x0F8,
448};
449
450/* Offsets to the MII-mode registers. */
451enum MII_offsets {
452	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
453	MII_Status=0xAE,
454};
455
456/* Bits in the interrupt status/mask registers. */
457enum intr_status_bits {
458	IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
459	IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
460	LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
461
462/* The Hamachi Rx and Tx buffer descriptors. */
463struct hamachi_desc {
464	__le32 status_n_length;
465#if ADDRLEN == 64
466	u32 pad;
467	__le64 addr;
468#else
469	__le32 addr;
470#endif
471};
472
473/* Bits in hamachi_desc.status_n_length */
474enum desc_status_bits {
475	DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
476	DescIntr=0x10000000,
477};
478
479#define PRIV_ALIGN	15  			/* Required alignment mask */
480#define MII_CNT		4
481struct hamachi_private {
482	/* Descriptor rings first for alignment.  Tx requires a second descriptor
483	   for status. */
484	struct hamachi_desc *rx_ring;
485	struct hamachi_desc *tx_ring;
486	struct sk_buff* rx_skbuff[RX_RING_SIZE];
487	struct sk_buff* tx_skbuff[TX_RING_SIZE];
488	dma_addr_t tx_ring_dma;
489	dma_addr_t rx_ring_dma;
490	struct timer_list timer;		/* Media selection timer. */
491	/* Frequently used and paired value: keep adjacent for cache effect. */
492	spinlock_t lock;
493	int chip_id;
494	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
495	unsigned int cur_tx, dirty_tx;
496	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
497	unsigned int tx_full:1;			/* The Tx queue is full. */
498	unsigned int duplex_lock:1;
499	unsigned int default_port:4;		/* Last dev->if_port value. */
500	/* MII transceiver section. */
501	int mii_cnt;								/* MII device addresses. */
502	struct mii_if_info mii_if;		/* MII lib hooks/info */
503	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
504	u32 rx_int_var, tx_int_var;	/* interrupt control variables */
505	u32 option;							/* Hold on to a copy of the options */
506	struct pci_dev *pci_dev;
507	void __iomem *base;
508};
509
510MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>");
511MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
512MODULE_LICENSE("GPL");
513
514module_param(max_interrupt_work, int, 0);
515module_param(mtu, int, 0);
516module_param(debug, int, 0);
517module_param(min_rx_pkt, int, 0);
518module_param(max_rx_gap, int, 0);
519module_param(max_rx_latency, int, 0);
520module_param(min_tx_pkt, int, 0);
521module_param(max_tx_gap, int, 0);
522module_param(max_tx_latency, int, 0);
523module_param(rx_copybreak, int, 0);
524module_param_array(rx_params, int, NULL, 0);
525module_param_array(tx_params, int, NULL, 0);
526module_param_array(options, int, NULL, 0);
527module_param_array(full_duplex, int, NULL, 0);
528module_param(force32, int, 0);
529MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
530MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
531MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
532MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
533MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
534MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
535MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
536MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
537MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
538MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
539MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
540MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
541MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
542MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
543MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
544
545static int read_eeprom(void __iomem *ioaddr, int location);
546static int mdio_read(struct net_device *dev, int phy_id, int location);
547static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
548static int hamachi_open(struct net_device *dev);
549static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
550static void hamachi_timer(unsigned long data);
551static void hamachi_tx_timeout(struct net_device *dev);
552static void hamachi_init_ring(struct net_device *dev);
553static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
554				      struct net_device *dev);
555static irqreturn_t hamachi_interrupt(int irq, void *dev_instance);
556static int hamachi_rx(struct net_device *dev);
557static inline int hamachi_tx(struct net_device *dev);
558static void hamachi_error(struct net_device *dev, int intr_status);
559static int hamachi_close(struct net_device *dev);
560static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
561static void set_rx_mode(struct net_device *dev);
562static const struct ethtool_ops ethtool_ops;
563static const struct ethtool_ops ethtool_ops_no_mii;
564
565static const struct net_device_ops hamachi_netdev_ops = {
566	.ndo_open		= hamachi_open,
567	.ndo_stop		= hamachi_close,
568	.ndo_start_xmit		= hamachi_start_xmit,
569	.ndo_get_stats		= hamachi_get_stats,
570	.ndo_set_rx_mode	= set_rx_mode,
571	.ndo_change_mtu		= eth_change_mtu,
572	.ndo_validate_addr	= eth_validate_addr,
573	.ndo_set_mac_address 	= eth_mac_addr,
574	.ndo_tx_timeout		= hamachi_tx_timeout,
575	.ndo_do_ioctl		= netdev_ioctl,
576};
577
578
579static int hamachi_init_one(struct pci_dev *pdev,
580			    const struct pci_device_id *ent)
581{
582	struct hamachi_private *hmp;
583	int option, i, rx_int_var, tx_int_var, boguscnt;
584	int chip_id = ent->driver_data;
585	int irq;
586	void __iomem *ioaddr;
587	unsigned long base;
588	static int card_idx;
589	struct net_device *dev;
590	void *ring_space;
591	dma_addr_t ring_dma;
592	int ret = -ENOMEM;
593
594/* when built into the kernel, we only print version if device is found */
595#ifndef MODULE
596	static int printed_version;
597	if (!printed_version++)
598		printk(version);
599#endif
600
601	if (pci_enable_device(pdev)) {
602		ret = -EIO;
603		goto err_out;
604	}
605
606	base = pci_resource_start(pdev, 0);
607#ifdef __alpha__				/* Really "64 bit addrs" */
608	base |= (pci_resource_start(pdev, 1) << 32);
609#endif
610
611	pci_set_master(pdev);
612
613	i = pci_request_regions(pdev, DRV_NAME);
614	if (i)
615		return i;
616
617	irq = pdev->irq;
618	ioaddr = ioremap(base, 0x400);
619	if (!ioaddr)
620		goto err_out_release;
621
622	dev = alloc_etherdev(sizeof(struct hamachi_private));
623	if (!dev)
624		goto err_out_iounmap;
625
626	SET_NETDEV_DEV(dev, &pdev->dev);
627
628	for (i = 0; i < 6; i++)
629		dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
630			: readb(ioaddr + StationAddr + i);
631
632#if ! defined(final_version)
633	if (hamachi_debug > 4)
634		for (i = 0; i < 0x10; i++)
635			printk("%2.2x%s",
636				   read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n");
637#endif
638
639	hmp = netdev_priv(dev);
640	spin_lock_init(&hmp->lock);
641
642	hmp->mii_if.dev = dev;
643	hmp->mii_if.mdio_read = mdio_read;
644	hmp->mii_if.mdio_write = mdio_write;
645	hmp->mii_if.phy_id_mask = 0x1f;
646	hmp->mii_if.reg_num_mask = 0x1f;
647
648	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
649	if (!ring_space)
650		goto err_out_cleardev;
651	hmp->tx_ring = ring_space;
652	hmp->tx_ring_dma = ring_dma;
653
654	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
655	if (!ring_space)
656		goto err_out_unmap_tx;
657	hmp->rx_ring = ring_space;
658	hmp->rx_ring_dma = ring_dma;
659
660	/* Check for options being passed in */
661	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
662	if (dev->mem_start)
663		option = dev->mem_start;
664
665	/* If the bus size is misidentified, do the following. */
666	force32 = force32 ? force32 :
667		((option  >= 0) ? ((option & 0x00000070) >> 4) : 0 );
668	if (force32)
669		writeb(force32, ioaddr + VirtualJumpers);
670
671	/* Hmmm, do we really need to reset the chip???. */
672	writeb(0x01, ioaddr + ChipReset);
673
674	/* After a reset, the clock speed measurement of the PCI bus will not
675	 * be valid for a moment.  Wait for a little while until it is.  If
676	 * it takes more than 10ms, forget it.
677	 */
678	udelay(10);
679	i = readb(ioaddr + PCIClkMeas);
680	for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
681		udelay(10);
682		i = readb(ioaddr + PCIClkMeas);
683	}
684
685	hmp->base = ioaddr;
686	pci_set_drvdata(pdev, dev);
687
688	hmp->chip_id = chip_id;
689	hmp->pci_dev = pdev;
690
691	/* The lower four bits are the media type. */
692	if (option > 0) {
693		hmp->option = option;
694		if (option & 0x200)
695			hmp->mii_if.full_duplex = 1;
696		else if (option & 0x080)
697			hmp->mii_if.full_duplex = 0;
698		hmp->default_port = option & 15;
699		if (hmp->default_port)
700			hmp->mii_if.force_media = 1;
701	}
702	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
703		hmp->mii_if.full_duplex = 1;
704
705	/* lock the duplex mode if someone specified a value */
706	if (hmp->mii_if.full_duplex || (option & 0x080))
707		hmp->duplex_lock = 1;
708
709	/* Set interrupt tuning parameters */
710	max_rx_latency = max_rx_latency & 0x00ff;
711	max_rx_gap = max_rx_gap & 0x00ff;
712	min_rx_pkt = min_rx_pkt & 0x00ff;
713	max_tx_latency = max_tx_latency & 0x00ff;
714	max_tx_gap = max_tx_gap & 0x00ff;
715	min_tx_pkt = min_tx_pkt & 0x00ff;
716
717	rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
718	tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
719	hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
720		(min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
721	hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
722		(min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
723
724
725	/* The Hamachi-specific entries in the device structure. */
726	dev->netdev_ops = &hamachi_netdev_ops;
727	dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
728		&ethtool_ops : &ethtool_ops_no_mii;
729	dev->watchdog_timeo = TX_TIMEOUT;
730	if (mtu)
731		dev->mtu = mtu;
732
733	i = register_netdev(dev);
734	if (i) {
735		ret = i;
736		goto err_out_unmap_rx;
737	}
738
739	printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n",
740		   dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
741		   ioaddr, dev->dev_addr, irq);
742	i = readb(ioaddr + PCIClkMeas);
743	printk(KERN_INFO "%s:  %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
744		   "%2.2x, LPA %4.4x.\n",
745		   dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
746		   i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
747		   readw(ioaddr + ANLinkPartnerAbility));
748
749	if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
750		int phy, phy_idx = 0;
751		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
752			int mii_status = mdio_read(dev, phy, MII_BMSR);
753			if (mii_status != 0xffff  &&
754				mii_status != 0x0000) {
755				hmp->phys[phy_idx++] = phy;
756				hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
757				printk(KERN_INFO "%s: MII PHY found at address %d, status "
758					   "0x%4.4x advertising %4.4x.\n",
759					   dev->name, phy, mii_status, hmp->mii_if.advertising);
760			}
761		}
762		hmp->mii_cnt = phy_idx;
763		if (hmp->mii_cnt > 0)
764			hmp->mii_if.phy_id = hmp->phys[0];
765		else
766			memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
767	}
768	/* Configure gigabit autonegotiation. */
769	writew(0x0400, ioaddr + ANXchngCtrl);	/* Enable legacy links. */
770	writew(0x08e0, ioaddr + ANAdvertise);	/* Set our advertise word. */
771	writew(0x1000, ioaddr + ANCtrl);			/* Enable negotiation */
772
773	card_idx++;
774	return 0;
775
776err_out_unmap_rx:
777	pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
778		hmp->rx_ring_dma);
779err_out_unmap_tx:
780	pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
781		hmp->tx_ring_dma);
782err_out_cleardev:
783	free_netdev (dev);
784err_out_iounmap:
785	iounmap(ioaddr);
786err_out_release:
787	pci_release_regions(pdev);
788err_out:
789	return ret;
790}
791
792static int read_eeprom(void __iomem *ioaddr, int location)
793{
794	int bogus_cnt = 1000;
795
796	/* We should check busy first - per docs -KDU */
797	while ((readb(ioaddr + EECmdStatus) & 0x40)  && --bogus_cnt > 0);
798	writew(location, ioaddr + EEAddr);
799	writeb(0x02, ioaddr + EECmdStatus);
800	bogus_cnt = 1000;
801	while ((readb(ioaddr + EECmdStatus) & 0x40)  && --bogus_cnt > 0);
802	if (hamachi_debug > 5)
803		printk("   EEPROM status is %2.2x after %d ticks.\n",
804			   (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
805	return readb(ioaddr + EEData);
806}
807
808/* MII Managemen Data I/O accesses.
809   These routines assume the MDIO controller is idle, and do not exit until
810   the command is finished. */
811
812static int mdio_read(struct net_device *dev, int phy_id, int location)
813{
814	struct hamachi_private *hmp = netdev_priv(dev);
815	void __iomem *ioaddr = hmp->base;
816	int i;
817
818	/* We should check busy first - per docs -KDU */
819	for (i = 10000; i >= 0; i--)
820		if ((readw(ioaddr + MII_Status) & 1) == 0)
821			break;
822	writew((phy_id<<8) + location, ioaddr + MII_Addr);
823	writew(0x0001, ioaddr + MII_Cmd);
824	for (i = 10000; i >= 0; i--)
825		if ((readw(ioaddr + MII_Status) & 1) == 0)
826			break;
827	return readw(ioaddr + MII_Rd_Data);
828}
829
830static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
831{
832	struct hamachi_private *hmp = netdev_priv(dev);
833	void __iomem *ioaddr = hmp->base;
834	int i;
835
836	/* We should check busy first - per docs -KDU */
837	for (i = 10000; i >= 0; i--)
838		if ((readw(ioaddr + MII_Status) & 1) == 0)
839			break;
840	writew((phy_id<<8) + location, ioaddr + MII_Addr);
841	writew(value, ioaddr + MII_Wr_Data);
842
843	/* Wait for the command to finish. */
844	for (i = 10000; i >= 0; i--)
845		if ((readw(ioaddr + MII_Status) & 1) == 0)
846			break;
847}
848
849
850static int hamachi_open(struct net_device *dev)
851{
852	struct hamachi_private *hmp = netdev_priv(dev);
853	void __iomem *ioaddr = hmp->base;
854	int i;
855	u32 rx_int_var, tx_int_var;
856	u16 fifo_info;
857
858	i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
859			dev->name, dev);
860	if (i)
861		return i;
862
863	hamachi_init_ring(dev);
864
865#if ADDRLEN == 64
866	/* writellll anyone ? */
867	writel(hmp->rx_ring_dma, ioaddr + RxPtr);
868	writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4);
869	writel(hmp->tx_ring_dma, ioaddr + TxPtr);
870	writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4);
871#else
872	writel(hmp->rx_ring_dma, ioaddr + RxPtr);
873	writel(hmp->tx_ring_dma, ioaddr + TxPtr);
874#endif
875
876	/* TODO:  It would make sense to organize this as words since the card
877	 * documentation does. -KDU
878	 */
879	for (i = 0; i < 6; i++)
880		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
881
882	/* Initialize other registers: with so many this eventually this will
883	   converted to an offset/value list. */
884
885	/* Configure the FIFO */
886	fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
887	switch (fifo_info){
888		case 0 :
889			/* No FIFO */
890			writew(0x0000, ioaddr + FIFOcfg);
891			break;
892		case 1 :
893			/* Configure the FIFO for 512K external, 16K used for Tx. */
894			writew(0x0028, ioaddr + FIFOcfg);
895			break;
896		case 2 :
897			/* Configure the FIFO for 1024 external, 32K used for Tx. */
898			writew(0x004C, ioaddr + FIFOcfg);
899			break;
900		case 3 :
901			/* Configure the FIFO for 2048 external, 32K used for Tx. */
902			writew(0x006C, ioaddr + FIFOcfg);
903			break;
904		default :
905			printk(KERN_WARNING "%s:  Unsupported external memory config!\n",
906				dev->name);
907			/* Default to no FIFO */
908			writew(0x0000, ioaddr + FIFOcfg);
909			break;
910	}
911
912	if (dev->if_port == 0)
913		dev->if_port = hmp->default_port;
914
915
916	/* Setting the Rx mode will start the Rx process. */
917	/* If someone didn't choose a duplex, default to full-duplex */
918	if (hmp->duplex_lock != 1)
919		hmp->mii_if.full_duplex = 1;
920
921	/* always 1, takes no more time to do it */
922	writew(0x0001, ioaddr + RxChecksum);
923	writew(0x0000, ioaddr + TxChecksum);
924	writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
925	writew(0x215F, ioaddr + MACCnfg);
926	writew(0x000C, ioaddr + FrameGap0);
927	/* WHAT?!?!?  Why isn't this documented somewhere? -KDU */
928	writew(0x1018, ioaddr + FrameGap1);
929	/* Why do we enable receives/transmits here? -KDU */
930	writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
931	/* Enable automatic generation of flow control frames, period 0xffff. */
932	writel(0x0030FFFF, ioaddr + FlowCtrl);
933	writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); 	/* dev->mtu+14 ??? */
934
935	/* Enable legacy links. */
936	writew(0x0400, ioaddr + ANXchngCtrl);	/* Enable legacy links. */
937	/* Initial Link LED to blinking red. */
938	writeb(0x03, ioaddr + LEDCtrl);
939
940	/* Configure interrupt mitigation.  This has a great effect on
941	   performance, so systems tuning should start here!. */
942
943	rx_int_var = hmp->rx_int_var;
944	tx_int_var = hmp->tx_int_var;
945
946	if (hamachi_debug > 1) {
947		printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
948			tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
949			(tx_int_var & 0x00ff0000) >> 16);
950		printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
951			rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
952			(rx_int_var & 0x00ff0000) >> 16);
953		printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
954	}
955
956	writel(tx_int_var, ioaddr + TxIntrCtrl);
957	writel(rx_int_var, ioaddr + RxIntrCtrl);
958
959	set_rx_mode(dev);
960
961	netif_start_queue(dev);
962
963	/* Enable interrupts by setting the interrupt mask. */
964	writel(0x80878787, ioaddr + InterruptEnable);
965	writew(0x0000, ioaddr + EventStatus);	/* Clear non-interrupting events */
966
967	/* Configure and start the DMA channels. */
968	/* Burst sizes are in the low three bits: size = 4<<(val&7) */
969#if ADDRLEN == 64
970	writew(0x005D, ioaddr + RxDMACtrl); 		/* 128 dword bursts */
971	writew(0x005D, ioaddr + TxDMACtrl);
972#else
973	writew(0x001D, ioaddr + RxDMACtrl);
974	writew(0x001D, ioaddr + TxDMACtrl);
975#endif
976	writew(0x0001, ioaddr + RxCmd);
977
978	if (hamachi_debug > 2) {
979		printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
980			   dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
981	}
982	/* Set the timer to check for link beat. */
983	init_timer(&hmp->timer);
984	hmp->timer.expires = RUN_AT((24*HZ)/10);			/* 2.4 sec. */
985	hmp->timer.data = (unsigned long)dev;
986	hmp->timer.function = hamachi_timer;				/* timer handler */
987	add_timer(&hmp->timer);
988
989	return 0;
990}
991
992static inline int hamachi_tx(struct net_device *dev)
993{
994	struct hamachi_private *hmp = netdev_priv(dev);
995
996	/* Update the dirty pointer until we find an entry that is
997		still owned by the card */
998	for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
999		int entry = hmp->dirty_tx % TX_RING_SIZE;
1000		struct sk_buff *skb;
1001
1002		if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
1003			break;
1004		/* Free the original skb. */
1005		skb = hmp->tx_skbuff[entry];
1006		if (skb) {
1007			pci_unmap_single(hmp->pci_dev,
1008				leXX_to_cpu(hmp->tx_ring[entry].addr),
1009				skb->len, PCI_DMA_TODEVICE);
1010			dev_kfree_skb(skb);
1011			hmp->tx_skbuff[entry] = NULL;
1012		}
1013		hmp->tx_ring[entry].status_n_length = 0;
1014		if (entry >= TX_RING_SIZE-1)
1015			hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1016				cpu_to_le32(DescEndRing);
1017		dev->stats.tx_packets++;
1018	}
1019
1020	return 0;
1021}
1022
1023static void hamachi_timer(unsigned long data)
1024{
1025	struct net_device *dev = (struct net_device *)data;
1026	struct hamachi_private *hmp = netdev_priv(dev);
1027	void __iomem *ioaddr = hmp->base;
1028	int next_tick = 10*HZ;
1029
1030	if (hamachi_debug > 2) {
1031		printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
1032			   "%4.4x.\n", dev->name, readw(ioaddr + ANStatus),
1033			   readw(ioaddr + ANLinkPartnerAbility));
1034		printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
1035		       "%4.4x %4.4x %4.4x.\n", dev->name,
1036		       readw(ioaddr + 0x0e0),
1037		       readw(ioaddr + 0x0e2),
1038		       readw(ioaddr + 0x0e4),
1039		       readw(ioaddr + 0x0e6),
1040		       readw(ioaddr + 0x0e8),
1041		       readw(ioaddr + 0x0eA));
1042	}
1043	/* We could do something here... nah. */
1044	hmp->timer.expires = RUN_AT(next_tick);
1045	add_timer(&hmp->timer);
1046}
1047
1048static void hamachi_tx_timeout(struct net_device *dev)
1049{
1050	int i;
1051	struct hamachi_private *hmp = netdev_priv(dev);
1052	void __iomem *ioaddr = hmp->base;
1053
1054	printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
1055		   " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
1056
1057	{
1058		printk(KERN_DEBUG "  Rx ring %p: ", hmp->rx_ring);
1059		for (i = 0; i < RX_RING_SIZE; i++)
1060			printk(KERN_CONT " %8.8x",
1061			       le32_to_cpu(hmp->rx_ring[i].status_n_length));
1062		printk(KERN_CONT "\n");
1063		printk(KERN_DEBUG"  Tx ring %p: ", hmp->tx_ring);
1064		for (i = 0; i < TX_RING_SIZE; i++)
1065			printk(KERN_CONT " %4.4x",
1066			       le32_to_cpu(hmp->tx_ring[i].status_n_length));
1067		printk(KERN_CONT "\n");
1068	}
1069
1070	/* Reinit the hardware and make sure the Rx and Tx processes
1071		are up and running.
1072	 */
1073	dev->if_port = 0;
1074	/* The right way to do Reset. -KDU
1075	 *		-Clear OWN bit in all Rx/Tx descriptors
1076	 *		-Wait 50 uS for channels to go idle
1077	 *		-Turn off MAC receiver
1078	 *		-Issue Reset
1079	 */
1080
1081	for (i = 0; i < RX_RING_SIZE; i++)
1082		hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
1083
1084	/* Presume that all packets in the Tx queue are gone if we have to
1085	 * re-init the hardware.
1086	 */
1087	for (i = 0; i < TX_RING_SIZE; i++){
1088		struct sk_buff *skb;
1089
1090		if (i >= TX_RING_SIZE - 1)
1091			hmp->tx_ring[i].status_n_length =
1092				cpu_to_le32(DescEndRing) |
1093				(hmp->tx_ring[i].status_n_length &
1094				 cpu_to_le32(0x0000ffff));
1095		else
1096			hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
1097		skb = hmp->tx_skbuff[i];
1098		if (skb){
1099			pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
1100				skb->len, PCI_DMA_TODEVICE);
1101			dev_kfree_skb(skb);
1102			hmp->tx_skbuff[i] = NULL;
1103		}
1104	}
1105
1106	udelay(60); /* Sleep 60 us just for safety sake */
1107	writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
1108
1109	writeb(0x01, ioaddr + ChipReset);  /* Reinit the hardware */
1110
1111	hmp->tx_full = 0;
1112	hmp->cur_rx = hmp->cur_tx = 0;
1113	hmp->dirty_rx = hmp->dirty_tx = 0;
1114	/* Rx packets are also presumed lost; however, we need to make sure a
1115	 * ring of buffers is in tact. -KDU
1116	 */
1117	for (i = 0; i < RX_RING_SIZE; i++){
1118		struct sk_buff *skb = hmp->rx_skbuff[i];
1119
1120		if (skb){
1121			pci_unmap_single(hmp->pci_dev,
1122				leXX_to_cpu(hmp->rx_ring[i].addr),
1123				hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1124			dev_kfree_skb(skb);
1125			hmp->rx_skbuff[i] = NULL;
1126		}
1127	}
1128	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1129	for (i = 0; i < RX_RING_SIZE; i++) {
1130		struct sk_buff *skb;
1131
1132		skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
1133		hmp->rx_skbuff[i] = skb;
1134		if (skb == NULL)
1135			break;
1136
1137                hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1138			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1139		hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1140			DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
1141	}
1142	hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1143	/* Mark the last entry as wrapping the ring. */
1144	hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1145
1146	/* Trigger an immediate transmit demand. */
1147	dev->trans_start = jiffies; /* prevent tx timeout */
1148	dev->stats.tx_errors++;
1149
1150	/* Restart the chip's Tx/Rx processes . */
1151	writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
1152	writew(0x0001, ioaddr + TxCmd); /* START Tx */
1153	writew(0x0001, ioaddr + RxCmd); /* START Rx */
1154
1155	netif_wake_queue(dev);
1156}
1157
1158
1159/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1160static void hamachi_init_ring(struct net_device *dev)
1161{
1162	struct hamachi_private *hmp = netdev_priv(dev);
1163	int i;
1164
1165	hmp->tx_full = 0;
1166	hmp->cur_rx = hmp->cur_tx = 0;
1167	hmp->dirty_rx = hmp->dirty_tx = 0;
1168
1169	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1170	 * card needs room to do 8 byte alignment, +2 so we can reserve
1171	 * the first 2 bytes, and +16 gets room for the status word from the
1172	 * card.  -KDU
1173	 */
1174	hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
1175		(((dev->mtu+26+7) & ~7) + 16));
1176
1177	/* Initialize all Rx descriptors. */
1178	for (i = 0; i < RX_RING_SIZE; i++) {
1179		hmp->rx_ring[i].status_n_length = 0;
1180		hmp->rx_skbuff[i] = NULL;
1181	}
1182	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1183	for (i = 0; i < RX_RING_SIZE; i++) {
1184		struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
1185		hmp->rx_skbuff[i] = skb;
1186		if (skb == NULL)
1187			break;
1188		skb_reserve(skb, 2); /* 16 byte align the IP header. */
1189                hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1190			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1191		/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
1192		hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1193			DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
1194	}
1195	hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1196	hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1197
1198	for (i = 0; i < TX_RING_SIZE; i++) {
1199		hmp->tx_skbuff[i] = NULL;
1200		hmp->tx_ring[i].status_n_length = 0;
1201	}
1202	/* Mark the last entry of the ring */
1203	hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1204}
1205
1206
1207static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
1208				      struct net_device *dev)
1209{
1210	struct hamachi_private *hmp = netdev_priv(dev);
1211	unsigned entry;
1212	u16 status;
1213
1214	/* Ok, now make sure that the queue has space before trying to
1215		add another skbuff.  if we return non-zero the scheduler
1216		should interpret this as a queue full and requeue the buffer
1217		for later.
1218	 */
1219	if (hmp->tx_full) {
1220		/* We should NEVER reach this point -KDU */
1221		printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
1222
1223		/* Wake the potentially-idle transmit channel. */
1224		/* If we don't need to read status, DON'T -KDU */
1225		status=readw(hmp->base + TxStatus);
1226		if( !(status & 0x0001) || (status & 0x0002))
1227			writew(0x0001, hmp->base + TxCmd);
1228		return NETDEV_TX_BUSY;
1229	}
1230
1231	/* Caution: the write order is important here, set the field
1232	   with the "ownership" bits last. */
1233
1234	/* Calculate the next Tx descriptor entry. */
1235	entry = hmp->cur_tx % TX_RING_SIZE;
1236
1237	hmp->tx_skbuff[entry] = skb;
1238
1239        hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1240		skb->data, skb->len, PCI_DMA_TODEVICE));
1241
1242	/* Hmmmm, could probably put a DescIntr on these, but the way
1243		the driver is currently coded makes Tx interrupts unnecessary
1244		since the clearing of the Tx ring is handled by the start_xmit
1245		routine.  This organization helps mitigate the interrupts a
1246		bit and probably renders the max_tx_latency param useless.
1247
1248		Update: Putting a DescIntr bit on all of the descriptors and
1249		mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
1250	*/
1251	if (entry >= TX_RING_SIZE-1)		 /* Wrap ring */
1252		hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
1253			DescEndPacket | DescEndRing | DescIntr | skb->len);
1254	else
1255		hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
1256			DescEndPacket | DescIntr | skb->len);
1257	hmp->cur_tx++;
1258
1259	/* Non-x86 Todo: explicitly flush cache lines here. */
1260
1261	/* Wake the potentially-idle transmit channel. */
1262	/* If we don't need to read status, DON'T -KDU */
1263	status=readw(hmp->base + TxStatus);
1264	if( !(status & 0x0001) || (status & 0x0002))
1265		writew(0x0001, hmp->base + TxCmd);
1266
1267	/* Immediately before returning, let's clear as many entries as we can. */
1268	hamachi_tx(dev);
1269
1270	/* We should kick the bottom half here, since we are not accepting
1271	 * interrupts with every packet.  i.e. realize that Gigabit ethernet
1272	 * can transmit faster than ordinary machines can load packets;
1273	 * hence, any packet that got put off because we were in the transmit
1274	 * routine should IMMEDIATELY get a chance to be re-queued. -KDU
1275	 */
1276	if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
1277		netif_wake_queue(dev);  /* Typical path */
1278	else {
1279		hmp->tx_full = 1;
1280		netif_stop_queue(dev);
1281	}
1282
1283	if (hamachi_debug > 4) {
1284		printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
1285			   dev->name, hmp->cur_tx, entry);
1286	}
1287	return NETDEV_TX_OK;
1288}
1289
1290/* The interrupt handler does all of the Rx thread work and cleans up
1291   after the Tx thread. */
1292static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
1293{
1294	struct net_device *dev = dev_instance;
1295	struct hamachi_private *hmp = netdev_priv(dev);
1296	void __iomem *ioaddr = hmp->base;
1297	long boguscnt = max_interrupt_work;
1298	int handled = 0;
1299
1300#ifndef final_version			/* Can never occur. */
1301	if (dev == NULL) {
1302		printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
1303		return IRQ_NONE;
1304	}
1305#endif
1306
1307	spin_lock(&hmp->lock);
1308
1309	do {
1310		u32 intr_status = readl(ioaddr + InterruptClear);
1311
1312		if (hamachi_debug > 4)
1313			printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
1314				   dev->name, intr_status);
1315
1316		if (intr_status == 0)
1317			break;
1318
1319		handled = 1;
1320
1321		if (intr_status & IntrRxDone)
1322			hamachi_rx(dev);
1323
1324		if (intr_status & IntrTxDone){
1325			/* This code should RARELY need to execute. After all, this is
1326			 * a gigabit link, it should consume packets as fast as we put
1327			 * them in AND we clear the Tx ring in hamachi_start_xmit().
1328			 */
1329			if (hmp->tx_full){
1330				for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
1331					int entry = hmp->dirty_tx % TX_RING_SIZE;
1332					struct sk_buff *skb;
1333
1334					if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
1335						break;
1336					skb = hmp->tx_skbuff[entry];
1337					/* Free the original skb. */
1338					if (skb){
1339						pci_unmap_single(hmp->pci_dev,
1340							leXX_to_cpu(hmp->tx_ring[entry].addr),
1341							skb->len,
1342							PCI_DMA_TODEVICE);
1343						dev_kfree_skb_irq(skb);
1344						hmp->tx_skbuff[entry] = NULL;
1345					}
1346					hmp->tx_ring[entry].status_n_length = 0;
1347					if (entry >= TX_RING_SIZE-1)
1348						hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1349							cpu_to_le32(DescEndRing);
1350					dev->stats.tx_packets++;
1351				}
1352				if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
1353					/* The ring is no longer full */
1354					hmp->tx_full = 0;
1355					netif_wake_queue(dev);
1356				}
1357			} else {
1358				netif_wake_queue(dev);
1359			}
1360		}
1361
1362
1363		/* Abnormal error summary/uncommon events handlers. */
1364		if (intr_status &
1365			(IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
1366			 LinkChange | NegotiationChange | StatsMax))
1367			hamachi_error(dev, intr_status);
1368
1369		if (--boguscnt < 0) {
1370			printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
1371				   dev->name, intr_status);
1372			break;
1373		}
1374	} while (1);
1375
1376	if (hamachi_debug > 3)
1377		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1378			   dev->name, readl(ioaddr + IntrStatus));
1379
1380#ifndef final_version
1381	/* Code that should never be run!  Perhaps remove after testing.. */
1382	{
1383		static int stopit = 10;
1384		if (dev->start == 0  &&  --stopit < 0) {
1385			printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
1386				   dev->name);
1387			free_irq(irq, dev);
1388		}
1389	}
1390#endif
1391
1392	spin_unlock(&hmp->lock);
1393	return IRQ_RETVAL(handled);
1394}
1395
1396/* This routine is logically part of the interrupt handler, but separated
1397   for clarity and better register allocation. */
1398static int hamachi_rx(struct net_device *dev)
1399{
1400	struct hamachi_private *hmp = netdev_priv(dev);
1401	int entry = hmp->cur_rx % RX_RING_SIZE;
1402	int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
1403
1404	if (hamachi_debug > 4) {
1405		printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
1406			   entry, hmp->rx_ring[entry].status_n_length);
1407	}
1408
1409	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1410	while (1) {
1411		struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
1412		u32 desc_status = le32_to_cpu(desc->status_n_length);
1413		u16 data_size = desc_status;	/* Implicit truncate */
1414		u8 *buf_addr;
1415		s32 frame_status;
1416
1417		if (desc_status & DescOwn)
1418			break;
1419		pci_dma_sync_single_for_cpu(hmp->pci_dev,
1420					    leXX_to_cpu(desc->addr),
1421					    hmp->rx_buf_sz,
1422					    PCI_DMA_FROMDEVICE);
1423		buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
1424		frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
1425		if (hamachi_debug > 4)
1426			printk(KERN_DEBUG "  hamachi_rx() status was %8.8x.\n",
1427				frame_status);
1428		if (--boguscnt < 0)
1429			break;
1430		if ( ! (desc_status & DescEndPacket)) {
1431			printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1432				   "multiple buffers, entry %#x length %d status %4.4x!\n",
1433				   dev->name, hmp->cur_rx, data_size, desc_status);
1434			printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1435				   dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
1436			printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
1437				   dev->name,
1438				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
1439				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
1440				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
1441			dev->stats.rx_length_errors++;
1442		} /* else  Omit for prototype errata??? */
1443		if (frame_status & 0x00380000) {
1444			/* There was an error. */
1445			if (hamachi_debug > 2)
1446				printk(KERN_DEBUG "  hamachi_rx() Rx error was %8.8x.\n",
1447					   frame_status);
1448			dev->stats.rx_errors++;
1449			if (frame_status & 0x00600000)
1450				dev->stats.rx_length_errors++;
1451			if (frame_status & 0x00080000)
1452				dev->stats.rx_frame_errors++;
1453			if (frame_status & 0x00100000)
1454				dev->stats.rx_crc_errors++;
1455			if (frame_status < 0)
1456				dev->stats.rx_dropped++;
1457		} else {
1458			struct sk_buff *skb;
1459			/* Omit CRC */
1460			u16 pkt_len = (frame_status & 0x07ff) - 4;
1461#ifdef RX_CHECKSUM
1462			u32 pfck = *(u32 *) &buf_addr[data_size - 8];
1463#endif
1464
1465
1466#ifndef final_version
1467			if (hamachi_debug > 4)
1468				printk(KERN_DEBUG "  hamachi_rx() normal Rx pkt length %d"
1469					   " of %d, bogus_cnt %d.\n",
1470					   pkt_len, data_size, boguscnt);
1471			if (hamachi_debug > 5)
1472				printk(KERN_DEBUG"%s:  rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
1473					   dev->name,
1474					   *(s32*)&(buf_addr[data_size - 20]),
1475					   *(s32*)&(buf_addr[data_size - 16]),
1476					   *(s32*)&(buf_addr[data_size - 12]),
1477					   *(s32*)&(buf_addr[data_size - 8]),
1478					   *(s32*)&(buf_addr[data_size - 4]));
1479#endif
1480			/* Check if the packet is long enough to accept without copying
1481			   to a minimally-sized skbuff. */
1482			if (pkt_len < rx_copybreak &&
1483			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1484#ifdef RX_CHECKSUM
1485				printk(KERN_ERR "%s: rx_copybreak non-zero "
1486				  "not good with RX_CHECKSUM\n", dev->name);
1487#endif
1488				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1489				pci_dma_sync_single_for_cpu(hmp->pci_dev,
1490							    leXX_to_cpu(hmp->rx_ring[entry].addr),
1491							    hmp->rx_buf_sz,
1492							    PCI_DMA_FROMDEVICE);
1493				/* Call copy + cksum if available. */
1494#if 1 || USE_IP_COPYSUM
1495				skb_copy_to_linear_data(skb,
1496					hmp->rx_skbuff[entry]->data, pkt_len);
1497				skb_put(skb, pkt_len);
1498#else
1499				memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
1500					+ entry*sizeof(*desc), pkt_len);
1501#endif
1502				pci_dma_sync_single_for_device(hmp->pci_dev,
1503							       leXX_to_cpu(hmp->rx_ring[entry].addr),
1504							       hmp->rx_buf_sz,
1505							       PCI_DMA_FROMDEVICE);
1506			} else {
1507				pci_unmap_single(hmp->pci_dev,
1508						 leXX_to_cpu(hmp->rx_ring[entry].addr),
1509						 hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1510				skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
1511				hmp->rx_skbuff[entry] = NULL;
1512			}
1513			skb->protocol = eth_type_trans(skb, dev);
1514
1515
1516#ifdef RX_CHECKSUM
1517			/* TCP or UDP on ipv4, DIX encoding */
1518			if (pfck>>24 == 0x91 || pfck>>24 == 0x51) {
1519				struct iphdr *ih = (struct iphdr *) skb->data;
1520				/* Check that IP packet is at least 46 bytes, otherwise,
1521				 * there may be pad bytes included in the hardware checksum.
1522				 * This wouldn't happen if everyone padded with 0.
1523				 */
1524				if (ntohs(ih->tot_len) >= 46){
1525					/* don't worry about frags */
1526					if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) {
1527						u32 inv = *(u32 *) &buf_addr[data_size - 16];
1528						u32 *p = (u32 *) &buf_addr[data_size - 20];
1529						register u32 crc, p_r, p_r1;
1530
1531						if (inv & 4) {
1532							inv &= ~4;
1533							--p;
1534						}
1535						p_r = *p;
1536						p_r1 = *(p-1);
1537						switch (inv) {
1538							case 0:
1539								crc = (p_r & 0xffff) + (p_r >> 16);
1540								break;
1541							case 1:
1542								crc = (p_r >> 16) + (p_r & 0xffff)
1543									+ (p_r1 >> 16 & 0xff00);
1544								break;
1545							case 2:
1546								crc = p_r + (p_r1 >> 16);
1547								break;
1548							case 3:
1549								crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
1550								break;
1551							default:	/*NOTREACHED*/ crc = 0;
1552						}
1553						if (crc & 0xffff0000) {
1554							crc &= 0xffff;
1555							++crc;
1556						}
1557						/* tcp/udp will add in pseudo */
1558						skb->csum = ntohs(pfck & 0xffff);
1559						if (skb->csum > crc)
1560							skb->csum -= crc;
1561						else
1562							skb->csum += (~crc & 0xffff);
1563						/*
1564						* could do the pseudo myself and return
1565						* CHECKSUM_UNNECESSARY
1566						*/
1567						skb->ip_summed = CHECKSUM_COMPLETE;
1568					}
1569				}
1570			}
1571#endif  /* RX_CHECKSUM */
1572
1573			netif_rx(skb);
1574			dev->stats.rx_packets++;
1575		}
1576		entry = (++hmp->cur_rx) % RX_RING_SIZE;
1577	}
1578
1579	/* Refill the Rx ring buffers. */
1580	for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
1581		struct hamachi_desc *desc;
1582
1583		entry = hmp->dirty_rx % RX_RING_SIZE;
1584		desc = &(hmp->rx_ring[entry]);
1585		if (hmp->rx_skbuff[entry] == NULL) {
1586			struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
1587
1588			hmp->rx_skbuff[entry] = skb;
1589			if (skb == NULL)
1590				break;		/* Better luck next round. */
1591			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1592                	desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1593				skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1594		}
1595		desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
1596		if (entry >= RX_RING_SIZE-1)
1597			desc->status_n_length |= cpu_to_le32(DescOwn |
1598				DescEndPacket | DescEndRing | DescIntr);
1599		else
1600			desc->status_n_length |= cpu_to_le32(DescOwn |
1601				DescEndPacket | DescIntr);
1602	}
1603
1604	/* Restart Rx engine if stopped. */
1605	/* If we don't need to check status, don't. -KDU */
1606	if (readw(hmp->base + RxStatus) & 0x0002)
1607		writew(0x0001, hmp->base + RxCmd);
1608
1609	return 0;
1610}
1611
1612/* This is more properly named "uncommon interrupt events", as it covers more
1613   than just errors. */
1614static void hamachi_error(struct net_device *dev, int intr_status)
1615{
1616	struct hamachi_private *hmp = netdev_priv(dev);
1617	void __iomem *ioaddr = hmp->base;
1618
1619	if (intr_status & (LinkChange|NegotiationChange)) {
1620		if (hamachi_debug > 1)
1621			printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
1622				   " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
1623				   dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2),
1624				   readw(ioaddr + ANLinkPartnerAbility),
1625				   readl(ioaddr + IntrStatus));
1626		if (readw(ioaddr + ANStatus) & 0x20)
1627			writeb(0x01, ioaddr + LEDCtrl);
1628		else
1629			writeb(0x03, ioaddr + LEDCtrl);
1630	}
1631	if (intr_status & StatsMax) {
1632		hamachi_get_stats(dev);
1633		/* Read the overflow bits to clear. */
1634		readl(ioaddr + 0x370);
1635		readl(ioaddr + 0x3F0);
1636	}
1637	if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) &&
1638	    hamachi_debug)
1639		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1640		       dev->name, intr_status);
1641	/* Hmmmmm, it's not clear how to recover from PCI faults. */
1642	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1643		dev->stats.tx_fifo_errors++;
1644	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1645		dev->stats.rx_fifo_errors++;
1646}
1647
1648static int hamachi_close(struct net_device *dev)
1649{
1650	struct hamachi_private *hmp = netdev_priv(dev);
1651	void __iomem *ioaddr = hmp->base;
1652	struct sk_buff *skb;
1653	int i;
1654
1655	netif_stop_queue(dev);
1656
1657	if (hamachi_debug > 1) {
1658		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
1659			   dev->name, readw(ioaddr + TxStatus),
1660			   readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus));
1661		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1662			   dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
1663	}
1664
1665	/* Disable interrupts by clearing the interrupt mask. */
1666	writel(0x0000, ioaddr + InterruptEnable);
1667
1668	/* Stop the chip's Tx and Rx processes. */
1669	writel(2, ioaddr + RxCmd);
1670	writew(2, ioaddr + TxCmd);
1671
1672#ifdef __i386__
1673	if (hamachi_debug > 2) {
1674		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1675			   (int)hmp->tx_ring_dma);
1676		for (i = 0; i < TX_RING_SIZE; i++)
1677			printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
1678				   readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
1679				   i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
1680		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1681			   (int)hmp->rx_ring_dma);
1682		for (i = 0; i < RX_RING_SIZE; i++) {
1683			printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
1684				   readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
1685				   i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
1686			if (hamachi_debug > 6) {
1687				if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
1688					u16 *addr = (u16 *)
1689						hmp->rx_skbuff[i]->data;
1690					int j;
1691					printk(KERN_DEBUG "Addr: ");
1692					for (j = 0; j < 0x50; j++)
1693						printk(" %4.4x", addr[j]);
1694					printk("\n");
1695				}
1696			}
1697		}
1698	}
1699#endif /* __i386__ debugging only */
1700
1701	free_irq(hmp->pci_dev->irq, dev);
1702
1703	del_timer_sync(&hmp->timer);
1704
1705	/* Free all the skbuffs in the Rx queue. */
1706	for (i = 0; i < RX_RING_SIZE; i++) {
1707		skb = hmp->rx_skbuff[i];
1708		hmp->rx_ring[i].status_n_length = 0;
1709		if (skb) {
1710			pci_unmap_single(hmp->pci_dev,
1711				leXX_to_cpu(hmp->rx_ring[i].addr),
1712				hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1713			dev_kfree_skb(skb);
1714			hmp->rx_skbuff[i] = NULL;
1715		}
1716		hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */
1717	}
1718	for (i = 0; i < TX_RING_SIZE; i++) {
1719		skb = hmp->tx_skbuff[i];
1720		if (skb) {
1721			pci_unmap_single(hmp->pci_dev,
1722				leXX_to_cpu(hmp->tx_ring[i].addr),
1723				skb->len, PCI_DMA_TODEVICE);
1724			dev_kfree_skb(skb);
1725			hmp->tx_skbuff[i] = NULL;
1726		}
1727	}
1728
1729	writeb(0x00, ioaddr + LEDCtrl);
1730
1731	return 0;
1732}
1733
1734static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
1735{
1736	struct hamachi_private *hmp = netdev_priv(dev);
1737	void __iomem *ioaddr = hmp->base;
1738
1739	/* We should lock this segment of code for SMP eventually, although
1740	   the vulnerability window is very small and statistics are
1741	   non-critical. */
1742        /* Ok, what goes here?  This appears to be stuck at 21 packets
1743           according to ifconfig.  It does get incremented in hamachi_tx(),
1744           so I think I'll comment it out here and see if better things
1745           happen.
1746        */
1747	/* dev->stats.tx_packets	= readl(ioaddr + 0x000); */
1748
1749	/* Total Uni+Brd+Multi */
1750	dev->stats.rx_bytes = readl(ioaddr + 0x330);
1751	/* Total Uni+Brd+Multi */
1752	dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
1753	/* Multicast Rx */
1754	dev->stats.multicast = readl(ioaddr + 0x320);
1755
1756	/* Over+Undersized */
1757	dev->stats.rx_length_errors = readl(ioaddr + 0x368);
1758	/* Jabber */
1759	dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
1760	/* Jabber */
1761	dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
1762	/* Symbol Errs */
1763	dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
1764	/* Dropped */
1765	dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
1766
1767	return &dev->stats;
1768}
1769
1770static void set_rx_mode(struct net_device *dev)
1771{
1772	struct hamachi_private *hmp = netdev_priv(dev);
1773	void __iomem *ioaddr = hmp->base;
1774
1775	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1776		writew(0x000F, ioaddr + AddrMode);
1777	} else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
1778		/* Too many to match, or accept all multicasts. */
1779		writew(0x000B, ioaddr + AddrMode);
1780	} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
1781		struct netdev_hw_addr *ha;
1782		int i = 0;
1783
1784		netdev_for_each_mc_addr(ha, dev) {
1785			writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
1786			writel(0x20000 | (*(u16 *)&ha->addr[4]),
1787				   ioaddr + 0x104 + i*8);
1788			i++;
1789		}
1790		/* Clear remaining entries. */
1791		for (; i < 64; i++)
1792			writel(0, ioaddr + 0x104 + i*8);
1793		writew(0x0003, ioaddr + AddrMode);
1794	} else {					/* Normal, unicast/broadcast-only mode. */
1795		writew(0x0001, ioaddr + AddrMode);
1796	}
1797}
1798
1799static int check_if_running(struct net_device *dev)
1800{
1801	if (!netif_running(dev))
1802		return -EINVAL;
1803	return 0;
1804}
1805
1806static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1807{
1808	struct hamachi_private *np = netdev_priv(dev);
1809
1810	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1811	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1812	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1813}
1814
1815static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1816{
1817	struct hamachi_private *np = netdev_priv(dev);
1818	spin_lock_irq(&np->lock);
1819	mii_ethtool_gset(&np->mii_if, ecmd);
1820	spin_unlock_irq(&np->lock);
1821	return 0;
1822}
1823
1824static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1825{
1826	struct hamachi_private *np = netdev_priv(dev);
1827	int res;
1828	spin_lock_irq(&np->lock);
1829	res = mii_ethtool_sset(&np->mii_if, ecmd);
1830	spin_unlock_irq(&np->lock);
1831	return res;
1832}
1833
1834static int hamachi_nway_reset(struct net_device *dev)
1835{
1836	struct hamachi_private *np = netdev_priv(dev);
1837	return mii_nway_restart(&np->mii_if);
1838}
1839
1840static u32 hamachi_get_link(struct net_device *dev)
1841{
1842	struct hamachi_private *np = netdev_priv(dev);
1843	return mii_link_ok(&np->mii_if);
1844}
1845
1846static const struct ethtool_ops ethtool_ops = {
1847	.begin = check_if_running,
1848	.get_drvinfo = hamachi_get_drvinfo,
1849	.get_settings = hamachi_get_settings,
1850	.set_settings = hamachi_set_settings,
1851	.nway_reset = hamachi_nway_reset,
1852	.get_link = hamachi_get_link,
1853};
1854
1855static const struct ethtool_ops ethtool_ops_no_mii = {
1856	.begin = check_if_running,
1857	.get_drvinfo = hamachi_get_drvinfo,
1858};
1859
1860static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1861{
1862	struct hamachi_private *np = netdev_priv(dev);
1863	struct mii_ioctl_data *data = if_mii(rq);
1864	int rc;
1865
1866	if (!netif_running(dev))
1867		return -EINVAL;
1868
1869	if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
1870		u32 *d = (u32 *)&rq->ifr_ifru;
1871		/* Should add this check here or an ordinary user can do nasty
1872		 * things. -KDU
1873		 *
1874		 * TODO: Shut down the Rx and Tx engines while doing this.
1875		 */
1876		if (!capable(CAP_NET_ADMIN))
1877			return -EPERM;
1878		writel(d[0], np->base + TxIntrCtrl);
1879		writel(d[1], np->base + RxIntrCtrl);
1880		printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
1881		  (u32) readl(np->base + TxIntrCtrl),
1882		  (u32) readl(np->base + RxIntrCtrl));
1883		rc = 0;
1884	}
1885
1886	else {
1887		spin_lock_irq(&np->lock);
1888		rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1889		spin_unlock_irq(&np->lock);
1890	}
1891
1892	return rc;
1893}
1894
1895
1896static void hamachi_remove_one(struct pci_dev *pdev)
1897{
1898	struct net_device *dev = pci_get_drvdata(pdev);
1899
1900	if (dev) {
1901		struct hamachi_private *hmp = netdev_priv(dev);
1902
1903		pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
1904			hmp->rx_ring_dma);
1905		pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
1906			hmp->tx_ring_dma);
1907		unregister_netdev(dev);
1908		iounmap(hmp->base);
1909		free_netdev(dev);
1910		pci_release_regions(pdev);
1911	}
1912}
1913
1914static const struct pci_device_id hamachi_pci_tbl[] = {
1915	{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
1916	{ 0, }
1917};
1918MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl);
1919
1920static struct pci_driver hamachi_driver = {
1921	.name		= DRV_NAME,
1922	.id_table	= hamachi_pci_tbl,
1923	.probe		= hamachi_init_one,
1924	.remove		= hamachi_remove_one,
1925};
1926
1927static int __init hamachi_init (void)
1928{
1929/* when a module, this is printed whether or not devices are found in probe */
1930#ifdef MODULE
1931	printk(version);
1932#endif
1933	return pci_register_driver(&hamachi_driver);
1934}
1935
1936static void __exit hamachi_exit (void)
1937{
1938	pci_unregister_driver(&hamachi_driver);
1939}
1940
1941
1942module_init(hamachi_init);
1943module_exit(hamachi_exit);
1944