1/*
2 *	This program is free software; you can redistribute it and/or
3 *	modify it under the terms of the GNU General Public License
4 *	as published by the Free Software Foundation; either version
5 *	2 of the License, or (at your option) any later version.
6 *
7 *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 *	(c) Copyright 2000, 2001 Red Hat Inc
9 *
10 *	Development of this driver was funded by Equiinet Ltd
11 *			http://www.equiinet.com
12 *
13 *	ChangeLog:
14 *
15 *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 *	unification of all the Z85x30 asynchronous drivers for real.
17 *
18 *	DMA now uses get_free_page as kmalloc buffers may span a 64K
19 *	boundary.
20 *
21 *	Modified for SMP safety and SMP locking by Alan Cox
22 *					<alan@lxorguk.ukuu.org.uk>
23 *
24 *	Performance
25 *
26 *	Z85230:
27 *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28 *	X.25 is not unrealistic on all machines. DMA mode can in theory
29 *	handle T1/E1 quite nicely. In practice the limit seems to be about
30 *	512Kbit->1Mbit depending on motherboard.
31 *
32 *	Z85C30:
33 *	64K will take DMA, 9600 baud X.25 should be ok.
34 *
35 *	Z8530:
36 *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/net.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/if_arp.h>
48#include <linux/delay.h>
49#include <linux/hdlc.h>
50#include <linux/ioport.h>
51#include <linux/init.h>
52#include <linux/gfp.h>
53#include <asm/dma.h>
54#include <asm/io.h>
55#define RT_LOCK
56#define RT_UNLOCK
57#include <linux/spinlock.h>
58
59#include "z85230.h"
60
61
62/**
63 *	z8530_read_port - Architecture specific interface function
64 *	@p: port to read
65 *
66 *	Provided port access methods. The Comtrol SV11 requires no delays
67 *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
68 *
69 *	In the longer term this should become an architecture specific
70 *	section so that this can become a generic driver interface for all
71 *	platforms. For now we only handle PC I/O ports with or without the
72 *	dread 5uS sanity delay.
73 *
74 *	The caller must hold sufficient locks to avoid violating the horrible
75 *	5uS delay rule.
76 */
77
78static inline int z8530_read_port(unsigned long p)
79{
80	u8 r=inb(Z8530_PORT_OF(p));
81	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
82		udelay(5);
83	return r;
84}
85
86/**
87 *	z8530_write_port - Architecture specific interface function
88 *	@p: port to write
89 *	@d: value to write
90 *
91 *	Write a value to a port with delays if need be. Note that the
92 *	caller must hold locks to avoid read/writes from other contexts
93 *	violating the 5uS rule
94 *
95 *	In the longer term this should become an architecture specific
96 *	section so that this can become a generic driver interface for all
97 *	platforms. For now we only handle PC I/O ports with or without the
98 *	dread 5uS sanity delay.
99 */
100
101
102static inline void z8530_write_port(unsigned long p, u8 d)
103{
104	outb(d,Z8530_PORT_OF(p));
105	if(p&Z8530_PORT_SLEEP)
106		udelay(5);
107}
108
109
110
111static void z8530_rx_done(struct z8530_channel *c);
112static void z8530_tx_done(struct z8530_channel *c);
113
114
115/**
116 *	read_zsreg - Read a register from a Z85230
117 *	@c: Z8530 channel to read from (2 per chip)
118 *	@reg: Register to read
119 *	FIXME: Use a spinlock.
120 *
121 *	Most of the Z8530 registers are indexed off the control registers.
122 *	A read is done by writing to the control register and reading the
123 *	register back.  The caller must hold the lock
124 */
125
126static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
127{
128	if(reg)
129		z8530_write_port(c->ctrlio, reg);
130	return z8530_read_port(c->ctrlio);
131}
132
133/**
134 *	read_zsdata - Read the data port of a Z8530 channel
135 *	@c: The Z8530 channel to read the data port from
136 *
137 *	The data port provides fast access to some things. We still
138 *	have all the 5uS delays to worry about.
139 */
140
141static inline u8 read_zsdata(struct z8530_channel *c)
142{
143	u8 r;
144	r=z8530_read_port(c->dataio);
145	return r;
146}
147
148/**
149 *	write_zsreg - Write to a Z8530 channel register
150 *	@c: The Z8530 channel
151 *	@reg: Register number
152 *	@val: Value to write
153 *
154 *	Write a value to an indexed register. The caller must hold the lock
155 *	to honour the irritating delay rules. We know about register 0
156 *	being fast to access.
157 *
158 *      Assumes c->lock is held.
159 */
160static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
161{
162	if(reg)
163		z8530_write_port(c->ctrlio, reg);
164	z8530_write_port(c->ctrlio, val);
165
166}
167
168/**
169 *	write_zsctrl - Write to a Z8530 control register
170 *	@c: The Z8530 channel
171 *	@val: Value to write
172 *
173 *	Write directly to the control register on the Z8530
174 */
175
176static inline void write_zsctrl(struct z8530_channel *c, u8 val)
177{
178	z8530_write_port(c->ctrlio, val);
179}
180
181/**
182 *	write_zsdata - Write to a Z8530 control register
183 *	@c: The Z8530 channel
184 *	@val: Value to write
185 *
186 *	Write directly to the data register on the Z8530
187 */
188
189
190static inline void write_zsdata(struct z8530_channel *c, u8 val)
191{
192	z8530_write_port(c->dataio, val);
193}
194
195/*
196 *	Register loading parameters for a dead port
197 */
198
199u8 z8530_dead_port[]=
200{
201	255
202};
203
204EXPORT_SYMBOL(z8530_dead_port);
205
206/*
207 *	Register loading parameters for currently supported circuit types
208 */
209
210
211/*
212 *	Data clocked by telco end. This is the correct data for the UK
213 *	"kilostream" service, and most other similar services.
214 */
215
216u8 z8530_hdlc_kilostream[]=
217{
218	4,	SYNC_ENAB|SDLC|X1CLK,
219	2,	0,	/* No vector */
220	1,	0,
221	3,	ENT_HM|RxCRC_ENAB|Rx8,
222	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
223	9,	0,		/* Disable interrupts */
224	6,	0xFF,
225	7,	FLAG,
226	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
227	11,	TCTRxCP,
228	14,	DISDPLL,
229	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
230	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
231	9,	NV|MIE|NORESET,
232	255
233};
234
235EXPORT_SYMBOL(z8530_hdlc_kilostream);
236
237/*
238 *	As above but for enhanced chips.
239 */
240
241u8 z8530_hdlc_kilostream_85230[]=
242{
243	4,	SYNC_ENAB|SDLC|X1CLK,
244	2,	0,	/* No vector */
245	1,	0,
246	3,	ENT_HM|RxCRC_ENAB|Rx8,
247	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
248	9,	0,		/* Disable interrupts */
249	6,	0xFF,
250	7,	FLAG,
251	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
252	11,	TCTRxCP,
253	14,	DISDPLL,
254	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
255	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
256	9,	NV|MIE|NORESET,
257	23,	3,		/* Extended mode AUTO TX and EOM*/
258
259	255
260};
261
262EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
263
264/**
265 *	z8530_flush_fifo - Flush on chip RX FIFO
266 *	@c: Channel to flush
267 *
268 *	Flush the receive FIFO. There is no specific option for this, we
269 *	blindly read bytes and discard them. Reading when there is no data
270 *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
271 *
272 *	All locking is handled for the caller. On return data may still be
273 *	present if it arrived during the flush.
274 */
275
276static void z8530_flush_fifo(struct z8530_channel *c)
277{
278	read_zsreg(c, R1);
279	read_zsreg(c, R1);
280	read_zsreg(c, R1);
281	read_zsreg(c, R1);
282	if(c->dev->type==Z85230)
283	{
284		read_zsreg(c, R1);
285		read_zsreg(c, R1);
286		read_zsreg(c, R1);
287		read_zsreg(c, R1);
288	}
289}
290
291/**
292 *	z8530_rtsdtr - Control the outgoing DTS/RTS line
293 *	@c: The Z8530 channel to control;
294 *	@set: 1 to set, 0 to clear
295 *
296 *	Sets or clears DTR/RTS on the requested line. All locking is handled
297 *	by the caller. For now we assume all boards use the actual RTS/DTR
298 *	on the chip. Apparently one or two don't. We'll scream about them
299 *	later.
300 */
301
302static void z8530_rtsdtr(struct z8530_channel *c, int set)
303{
304	if (set)
305		c->regs[5] |= (RTS | DTR);
306	else
307		c->regs[5] &= ~(RTS | DTR);
308	write_zsreg(c, R5, c->regs[5]);
309}
310
311/**
312 *	z8530_rx - Handle a PIO receive event
313 *	@c: Z8530 channel to process
314 *
315 *	Receive handler for receiving in PIO mode. This is much like the
316 *	async one but not quite the same or as complex
317 *
318 *	Note: Its intended that this handler can easily be separated from
319 *	the main code to run realtime. That'll be needed for some machines
320 *	(eg to ever clock 64kbits on a sparc ;)).
321 *
322 *	The RT_LOCK macros don't do anything now. Keep the code covered
323 *	by them as short as possible in all circumstances - clocks cost
324 *	baud. The interrupt handler is assumed to be atomic w.r.t. to
325 *	other code - this is true in the RT case too.
326 *
327 *	We only cover the sync cases for this. If you want 2Mbit async
328 *	do it yourself but consider medical assistance first. This non DMA
329 *	synchronous mode is portable code. The DMA mode assumes PCI like
330 *	ISA DMA
331 *
332 *	Called with the device lock held
333 */
334
335static void z8530_rx(struct z8530_channel *c)
336{
337	u8 ch,stat;
338
339	while(1)
340	{
341		/* FIFO empty ? */
342		if(!(read_zsreg(c, R0)&1))
343			break;
344		ch=read_zsdata(c);
345		stat=read_zsreg(c, R1);
346
347		/*
348		 *	Overrun ?
349		 */
350		if(c->count < c->max)
351		{
352			*c->dptr++=ch;
353			c->count++;
354		}
355
356		if(stat&END_FR)
357		{
358
359			/*
360			 *	Error ?
361			 */
362			if(stat&(Rx_OVR|CRC_ERR))
363			{
364				/* Rewind the buffer and return */
365				if(c->skb)
366					c->dptr=c->skb->data;
367				c->count=0;
368				if(stat&Rx_OVR)
369				{
370					pr_warn("%s: overrun\n", c->dev->name);
371					c->rx_overrun++;
372				}
373				if(stat&CRC_ERR)
374				{
375					c->rx_crc_err++;
376					/* printk("crc error\n"); */
377				}
378				/* Shove the frame upstream */
379			}
380			else
381			{
382				/*
383				 *	Drop the lock for RX processing, or
384		 		 *	there are deadlocks
385		 		 */
386				z8530_rx_done(c);
387				write_zsctrl(c, RES_Rx_CRC);
388			}
389		}
390	}
391	/*
392	 *	Clear irq
393	 */
394	write_zsctrl(c, ERR_RES);
395	write_zsctrl(c, RES_H_IUS);
396}
397
398
399/**
400 *	z8530_tx - Handle a PIO transmit event
401 *	@c: Z8530 channel to process
402 *
403 *	Z8530 transmit interrupt handler for the PIO mode. The basic
404 *	idea is to attempt to keep the FIFO fed. We fill as many bytes
405 *	in as possible, its quite possible that we won't keep up with the
406 *	data rate otherwise.
407 */
408
409static void z8530_tx(struct z8530_channel *c)
410{
411	while(c->txcount) {
412		/* FIFO full ? */
413		if(!(read_zsreg(c, R0)&4))
414			return;
415		c->txcount--;
416		/*
417		 *	Shovel out the byte
418		 */
419		write_zsreg(c, R8, *c->tx_ptr++);
420		write_zsctrl(c, RES_H_IUS);
421		/* We are about to underflow */
422		if(c->txcount==0)
423		{
424			write_zsctrl(c, RES_EOM_L);
425			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
426		}
427	}
428
429
430	/*
431	 *	End of frame TX - fire another one
432	 */
433
434	write_zsctrl(c, RES_Tx_P);
435
436	z8530_tx_done(c);
437	write_zsctrl(c, RES_H_IUS);
438}
439
440/**
441 *	z8530_status - Handle a PIO status exception
442 *	@chan: Z8530 channel to process
443 *
444 *	A status event occurred in PIO synchronous mode. There are several
445 *	reasons the chip will bother us here. A transmit underrun means we
446 *	failed to feed the chip fast enough and just broke a packet. A DCD
447 *	change is a line up or down.
448 */
449
450static void z8530_status(struct z8530_channel *chan)
451{
452	u8 status, altered;
453
454	status = read_zsreg(chan, R0);
455	altered = chan->status ^ status;
456
457	chan->status = status;
458
459	if (status & TxEOM) {
460/*		printk("%s: Tx underrun.\n", chan->dev->name); */
461		chan->netdevice->stats.tx_fifo_errors++;
462		write_zsctrl(chan, ERR_RES);
463		z8530_tx_done(chan);
464	}
465
466	if (altered & chan->dcdcheck)
467	{
468		if (status & chan->dcdcheck) {
469			pr_info("%s: DCD raised\n", chan->dev->name);
470			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
471			if (chan->netdevice)
472				netif_carrier_on(chan->netdevice);
473		} else {
474			pr_info("%s: DCD lost\n", chan->dev->name);
475			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
476			z8530_flush_fifo(chan);
477			if (chan->netdevice)
478				netif_carrier_off(chan->netdevice);
479		}
480
481	}
482	write_zsctrl(chan, RES_EXT_INT);
483	write_zsctrl(chan, RES_H_IUS);
484}
485
486struct z8530_irqhandler z8530_sync =
487{
488	z8530_rx,
489	z8530_tx,
490	z8530_status
491};
492
493EXPORT_SYMBOL(z8530_sync);
494
495/**
496 *	z8530_dma_rx - Handle a DMA RX event
497 *	@chan: Channel to handle
498 *
499 *	Non bus mastering DMA interfaces for the Z8x30 devices. This
500 *	is really pretty PC specific. The DMA mode means that most receive
501 *	events are handled by the DMA hardware. We get a kick here only if
502 *	a frame ended.
503 */
504
505static void z8530_dma_rx(struct z8530_channel *chan)
506{
507	if(chan->rxdma_on)
508	{
509		/* Special condition check only */
510		u8 status;
511
512		read_zsreg(chan, R7);
513		read_zsreg(chan, R6);
514
515		status=read_zsreg(chan, R1);
516
517		if(status&END_FR)
518		{
519			z8530_rx_done(chan);	/* Fire up the next one */
520		}
521		write_zsctrl(chan, ERR_RES);
522		write_zsctrl(chan, RES_H_IUS);
523	}
524	else
525	{
526		/* DMA is off right now, drain the slow way */
527		z8530_rx(chan);
528	}
529}
530
531/**
532 *	z8530_dma_tx - Handle a DMA TX event
533 *	@chan:	The Z8530 channel to handle
534 *
535 *	We have received an interrupt while doing DMA transmissions. It
536 *	shouldn't happen. Scream loudly if it does.
537 */
538
539static void z8530_dma_tx(struct z8530_channel *chan)
540{
541	if(!chan->dma_tx)
542	{
543		pr_warn("Hey who turned the DMA off?\n");
544		z8530_tx(chan);
545		return;
546	}
547	/* This shouldn't occur in DMA mode */
548	pr_err("DMA tx - bogus event!\n");
549	z8530_tx(chan);
550}
551
552/**
553 *	z8530_dma_status - Handle a DMA status exception
554 *	@chan: Z8530 channel to process
555 *
556 *	A status event occurred on the Z8530. We receive these for two reasons
557 *	when in DMA mode. Firstly if we finished a packet transfer we get one
558 *	and kick the next packet out. Secondly we may see a DCD change.
559 *
560 */
561
562static void z8530_dma_status(struct z8530_channel *chan)
563{
564	u8 status, altered;
565
566	status=read_zsreg(chan, R0);
567	altered=chan->status^status;
568
569	chan->status=status;
570
571
572	if(chan->dma_tx)
573	{
574		if(status&TxEOM)
575		{
576			unsigned long flags;
577
578			flags=claim_dma_lock();
579			disable_dma(chan->txdma);
580			clear_dma_ff(chan->txdma);
581			chan->txdma_on=0;
582			release_dma_lock(flags);
583			z8530_tx_done(chan);
584		}
585	}
586
587	if (altered & chan->dcdcheck)
588	{
589		if (status & chan->dcdcheck) {
590			pr_info("%s: DCD raised\n", chan->dev->name);
591			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
592			if (chan->netdevice)
593				netif_carrier_on(chan->netdevice);
594		} else {
595			pr_info("%s: DCD lost\n", chan->dev->name);
596			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
597			z8530_flush_fifo(chan);
598			if (chan->netdevice)
599				netif_carrier_off(chan->netdevice);
600		}
601	}
602
603	write_zsctrl(chan, RES_EXT_INT);
604	write_zsctrl(chan, RES_H_IUS);
605}
606
607static struct z8530_irqhandler z8530_dma_sync = {
608	z8530_dma_rx,
609	z8530_dma_tx,
610	z8530_dma_status
611};
612
613static struct z8530_irqhandler z8530_txdma_sync = {
614	z8530_rx,
615	z8530_dma_tx,
616	z8530_dma_status
617};
618
619/**
620 *	z8530_rx_clear - Handle RX events from a stopped chip
621 *	@c: Z8530 channel to shut up
622 *
623 *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
624 *	For machines with PCI Z85x30 cards, or level triggered interrupts
625 *	(eg the MacII) we must clear the interrupt cause or die.
626 */
627
628
629static void z8530_rx_clear(struct z8530_channel *c)
630{
631	/*
632	 *	Data and status bytes
633	 */
634	u8 stat;
635
636	read_zsdata(c);
637	stat=read_zsreg(c, R1);
638
639	if(stat&END_FR)
640		write_zsctrl(c, RES_Rx_CRC);
641	/*
642	 *	Clear irq
643	 */
644	write_zsctrl(c, ERR_RES);
645	write_zsctrl(c, RES_H_IUS);
646}
647
648/**
649 *	z8530_tx_clear - Handle TX events from a stopped chip
650 *	@c: Z8530 channel to shut up
651 *
652 *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
653 *	For machines with PCI Z85x30 cards, or level triggered interrupts
654 *	(eg the MacII) we must clear the interrupt cause or die.
655 */
656
657static void z8530_tx_clear(struct z8530_channel *c)
658{
659	write_zsctrl(c, RES_Tx_P);
660	write_zsctrl(c, RES_H_IUS);
661}
662
663/**
664 *	z8530_status_clear - Handle status events from a stopped chip
665 *	@chan: Z8530 channel to shut up
666 *
667 *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
668 *	For machines with PCI Z85x30 cards, or level triggered interrupts
669 *	(eg the MacII) we must clear the interrupt cause or die.
670 */
671
672static void z8530_status_clear(struct z8530_channel *chan)
673{
674	u8 status=read_zsreg(chan, R0);
675	if(status&TxEOM)
676		write_zsctrl(chan, ERR_RES);
677	write_zsctrl(chan, RES_EXT_INT);
678	write_zsctrl(chan, RES_H_IUS);
679}
680
681struct z8530_irqhandler z8530_nop=
682{
683	z8530_rx_clear,
684	z8530_tx_clear,
685	z8530_status_clear
686};
687
688
689EXPORT_SYMBOL(z8530_nop);
690
691/**
692 *	z8530_interrupt - Handle an interrupt from a Z8530
693 *	@irq: 	Interrupt number
694 *	@dev_id: The Z8530 device that is interrupting.
695 *
696 *	A Z85[2]30 device has stuck its hand in the air for attention.
697 *	We scan both the channels on the chip for events and then call
698 *	the channel specific call backs for each channel that has events.
699 *	We have to use callback functions because the two channels can be
700 *	in different modes.
701 *
702 *	Locking is done for the handlers. Note that locking is done
703 *	at the chip level (the 5uS delay issue is per chip not per
704 *	channel). c->lock for both channels points to dev->lock
705 */
706
707irqreturn_t z8530_interrupt(int irq, void *dev_id)
708{
709	struct z8530_dev *dev=dev_id;
710	u8 uninitialized_var(intr);
711	static volatile int locker=0;
712	int work=0;
713	struct z8530_irqhandler *irqs;
714
715	if(locker)
716	{
717		pr_err("IRQ re-enter\n");
718		return IRQ_NONE;
719	}
720	locker=1;
721
722	spin_lock(&dev->lock);
723
724	while(++work<5000)
725	{
726
727		intr = read_zsreg(&dev->chanA, R3);
728		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
729			break;
730
731		/* This holds the IRQ status. On the 8530 you must read it from chan
732		   A even though it applies to the whole chip */
733
734		/* Now walk the chip and see what it is wanting - it may be
735		   an IRQ for someone else remember */
736
737		irqs=dev->chanA.irqs;
738
739		if(intr & (CHARxIP|CHATxIP|CHAEXT))
740		{
741			if(intr&CHARxIP)
742				irqs->rx(&dev->chanA);
743			if(intr&CHATxIP)
744				irqs->tx(&dev->chanA);
745			if(intr&CHAEXT)
746				irqs->status(&dev->chanA);
747		}
748
749		irqs=dev->chanB.irqs;
750
751		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
752		{
753			if(intr&CHBRxIP)
754				irqs->rx(&dev->chanB);
755			if(intr&CHBTxIP)
756				irqs->tx(&dev->chanB);
757			if(intr&CHBEXT)
758				irqs->status(&dev->chanB);
759		}
760	}
761	spin_unlock(&dev->lock);
762	if(work==5000)
763		pr_err("%s: interrupt jammed - abort(0x%X)!\n",
764		       dev->name, intr);
765	/* Ok all done */
766	locker=0;
767	return IRQ_HANDLED;
768}
769
770EXPORT_SYMBOL(z8530_interrupt);
771
772static const u8 reg_init[16]=
773{
774	0,0,0,0,
775	0,0,0,0,
776	0,0,0,0,
777	0x55,0,0,0
778};
779
780
781/**
782 *	z8530_sync_open - Open a Z8530 channel for PIO
783 *	@dev:	The network interface we are using
784 *	@c:	The Z8530 channel to open in synchronous PIO mode
785 *
786 *	Switch a Z8530 into synchronous mode without DMA assist. We
787 *	raise the RTS/DTR and commence network operation.
788 */
789
790int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
791{
792	unsigned long flags;
793
794	spin_lock_irqsave(c->lock, flags);
795
796	c->sync = 1;
797	c->mtu = dev->mtu+64;
798	c->count = 0;
799	c->skb = NULL;
800	c->skb2 = NULL;
801	c->irqs = &z8530_sync;
802
803	/* This loads the double buffer up */
804	z8530_rx_done(c);	/* Load the frame ring */
805	z8530_rx_done(c);	/* Load the backup frame */
806	z8530_rtsdtr(c,1);
807	c->dma_tx = 0;
808	c->regs[R1]|=TxINT_ENAB;
809	write_zsreg(c, R1, c->regs[R1]);
810	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
811
812	spin_unlock_irqrestore(c->lock, flags);
813	return 0;
814}
815
816
817EXPORT_SYMBOL(z8530_sync_open);
818
819/**
820 *	z8530_sync_close - Close a PIO Z8530 channel
821 *	@dev: Network device to close
822 *	@c: Z8530 channel to disassociate and move to idle
823 *
824 *	Close down a Z8530 interface and switch its interrupt handlers
825 *	to discard future events.
826 */
827
828int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
829{
830	u8 chk;
831	unsigned long flags;
832
833	spin_lock_irqsave(c->lock, flags);
834	c->irqs = &z8530_nop;
835	c->max = 0;
836	c->sync = 0;
837
838	chk=read_zsreg(c,R0);
839	write_zsreg(c, R3, c->regs[R3]);
840	z8530_rtsdtr(c,0);
841
842	spin_unlock_irqrestore(c->lock, flags);
843	return 0;
844}
845
846EXPORT_SYMBOL(z8530_sync_close);
847
848/**
849 *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
850 *	@dev: The network device to attach
851 *	@c: The Z8530 channel to configure in sync DMA mode.
852 *
853 *	Set up a Z85x30 device for synchronous DMA in both directions. Two
854 *	ISA DMA channels must be available for this to work. We assume ISA
855 *	DMA driven I/O and PC limits on access.
856 */
857
858int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
859{
860	unsigned long cflags, dflags;
861
862	c->sync = 1;
863	c->mtu = dev->mtu+64;
864	c->count = 0;
865	c->skb = NULL;
866	c->skb2 = NULL;
867	/*
868	 *	Load the DMA interfaces up
869	 */
870	c->rxdma_on = 0;
871	c->txdma_on = 0;
872
873	/*
874	 *	Allocate the DMA flip buffers. Limit by page size.
875	 *	Everyone runs 1500 mtu or less on wan links so this
876	 *	should be fine.
877	 */
878
879	if(c->mtu  > PAGE_SIZE/2)
880		return -EMSGSIZE;
881
882	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883	if(c->rx_buf[0]==NULL)
884		return -ENOBUFS;
885	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
886
887	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
888	if(c->tx_dma_buf[0]==NULL)
889	{
890		free_page((unsigned long)c->rx_buf[0]);
891		c->rx_buf[0]=NULL;
892		return -ENOBUFS;
893	}
894	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
895
896	c->tx_dma_used=0;
897	c->dma_tx = 1;
898	c->dma_num=0;
899	c->dma_ready=1;
900
901	/*
902	 *	Enable DMA control mode
903	 */
904
905	spin_lock_irqsave(c->lock, cflags);
906
907	/*
908	 *	TX DMA via DIR/REQ
909	 */
910
911	c->regs[R14]|= DTRREQ;
912	write_zsreg(c, R14, c->regs[R14]);
913
914	c->regs[R1]&= ~TxINT_ENAB;
915	write_zsreg(c, R1, c->regs[R1]);
916
917	/*
918	 *	RX DMA via W/Req
919	 */
920
921	c->regs[R1]|= WT_FN_RDYFN;
922	c->regs[R1]|= WT_RDY_RT;
923	c->regs[R1]|= INT_ERR_Rx;
924	c->regs[R1]&= ~TxINT_ENAB;
925	write_zsreg(c, R1, c->regs[R1]);
926	c->regs[R1]|= WT_RDY_ENAB;
927	write_zsreg(c, R1, c->regs[R1]);
928
929	/*
930	 *	DMA interrupts
931	 */
932
933	/*
934	 *	Set up the DMA configuration
935	 */
936
937	dflags=claim_dma_lock();
938
939	disable_dma(c->rxdma);
940	clear_dma_ff(c->rxdma);
941	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
942	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
943	set_dma_count(c->rxdma, c->mtu);
944	enable_dma(c->rxdma);
945
946	disable_dma(c->txdma);
947	clear_dma_ff(c->txdma);
948	set_dma_mode(c->txdma, DMA_MODE_WRITE);
949	disable_dma(c->txdma);
950
951	release_dma_lock(dflags);
952
953	/*
954	 *	Select the DMA interrupt handlers
955	 */
956
957	c->rxdma_on = 1;
958	c->txdma_on = 1;
959	c->tx_dma_used = 1;
960
961	c->irqs = &z8530_dma_sync;
962	z8530_rtsdtr(c,1);
963	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
964
965	spin_unlock_irqrestore(c->lock, cflags);
966
967	return 0;
968}
969
970EXPORT_SYMBOL(z8530_sync_dma_open);
971
972/**
973 *	z8530_sync_dma_close - Close down DMA I/O
974 *	@dev: Network device to detach
975 *	@c: Z8530 channel to move into discard mode
976 *
977 *	Shut down a DMA mode synchronous interface. Halt the DMA, and
978 *	free the buffers.
979 */
980
981int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
982{
983	u8 chk;
984	unsigned long flags;
985
986	c->irqs = &z8530_nop;
987	c->max = 0;
988	c->sync = 0;
989
990	/*
991	 *	Disable the PC DMA channels
992	 */
993
994	flags=claim_dma_lock();
995	disable_dma(c->rxdma);
996	clear_dma_ff(c->rxdma);
997
998	c->rxdma_on = 0;
999
1000	disable_dma(c->txdma);
1001	clear_dma_ff(c->txdma);
1002	release_dma_lock(flags);
1003
1004	c->txdma_on = 0;
1005	c->tx_dma_used = 0;
1006
1007	spin_lock_irqsave(c->lock, flags);
1008
1009	/*
1010	 *	Disable DMA control mode
1011	 */
1012
1013	c->regs[R1]&= ~WT_RDY_ENAB;
1014	write_zsreg(c, R1, c->regs[R1]);
1015	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1016	c->regs[R1]|= INT_ALL_Rx;
1017	write_zsreg(c, R1, c->regs[R1]);
1018	c->regs[R14]&= ~DTRREQ;
1019	write_zsreg(c, R14, c->regs[R14]);
1020
1021	if(c->rx_buf[0])
1022	{
1023		free_page((unsigned long)c->rx_buf[0]);
1024		c->rx_buf[0]=NULL;
1025	}
1026	if(c->tx_dma_buf[0])
1027	{
1028		free_page((unsigned  long)c->tx_dma_buf[0]);
1029		c->tx_dma_buf[0]=NULL;
1030	}
1031	chk=read_zsreg(c,R0);
1032	write_zsreg(c, R3, c->regs[R3]);
1033	z8530_rtsdtr(c,0);
1034
1035	spin_unlock_irqrestore(c->lock, flags);
1036
1037	return 0;
1038}
1039
1040EXPORT_SYMBOL(z8530_sync_dma_close);
1041
1042/**
1043 *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1044 *	@dev: The network device to attach
1045 *	@c: The Z8530 channel to configure in sync DMA mode.
1046 *
1047 *	Set up a Z85x30 device for synchronous DMA tranmission. One
1048 *	ISA DMA channel must be available for this to work. The receive
1049 *	side is run in PIO mode, but then it has the bigger FIFO.
1050 */
1051
1052int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1053{
1054	unsigned long cflags, dflags;
1055
1056	printk("Opening sync interface for TX-DMA\n");
1057	c->sync = 1;
1058	c->mtu = dev->mtu+64;
1059	c->count = 0;
1060	c->skb = NULL;
1061	c->skb2 = NULL;
1062
1063	/*
1064	 *	Allocate the DMA flip buffers. Limit by page size.
1065	 *	Everyone runs 1500 mtu or less on wan links so this
1066	 *	should be fine.
1067	 */
1068
1069	if(c->mtu  > PAGE_SIZE/2)
1070		return -EMSGSIZE;
1071
1072	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1073	if(c->tx_dma_buf[0]==NULL)
1074		return -ENOBUFS;
1075
1076	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1077
1078
1079	spin_lock_irqsave(c->lock, cflags);
1080
1081	/*
1082	 *	Load the PIO receive ring
1083	 */
1084
1085	z8530_rx_done(c);
1086	z8530_rx_done(c);
1087
1088 	/*
1089	 *	Load the DMA interfaces up
1090	 */
1091
1092	c->rxdma_on = 0;
1093	c->txdma_on = 0;
1094
1095	c->tx_dma_used=0;
1096	c->dma_num=0;
1097	c->dma_ready=1;
1098	c->dma_tx = 1;
1099
1100 	/*
1101	 *	Enable DMA control mode
1102	 */
1103
1104 	/*
1105	 *	TX DMA via DIR/REQ
1106 	 */
1107	c->regs[R14]|= DTRREQ;
1108	write_zsreg(c, R14, c->regs[R14]);
1109
1110	c->regs[R1]&= ~TxINT_ENAB;
1111	write_zsreg(c, R1, c->regs[R1]);
1112
1113	/*
1114	 *	Set up the DMA configuration
1115	 */
1116
1117	dflags = claim_dma_lock();
1118
1119	disable_dma(c->txdma);
1120	clear_dma_ff(c->txdma);
1121	set_dma_mode(c->txdma, DMA_MODE_WRITE);
1122	disable_dma(c->txdma);
1123
1124	release_dma_lock(dflags);
1125
1126	/*
1127	 *	Select the DMA interrupt handlers
1128	 */
1129
1130	c->rxdma_on = 0;
1131	c->txdma_on = 1;
1132	c->tx_dma_used = 1;
1133
1134	c->irqs = &z8530_txdma_sync;
1135	z8530_rtsdtr(c,1);
1136	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1137	spin_unlock_irqrestore(c->lock, cflags);
1138
1139	return 0;
1140}
1141
1142EXPORT_SYMBOL(z8530_sync_txdma_open);
1143
1144/**
1145 *	z8530_sync_txdma_close - Close down a TX driven DMA channel
1146 *	@dev: Network device to detach
1147 *	@c: Z8530 channel to move into discard mode
1148 *
1149 *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1150 *	and  free the buffers.
1151 */
1152
1153int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1154{
1155	unsigned long dflags, cflags;
1156	u8 chk;
1157
1158
1159	spin_lock_irqsave(c->lock, cflags);
1160
1161	c->irqs = &z8530_nop;
1162	c->max = 0;
1163	c->sync = 0;
1164
1165	/*
1166	 *	Disable the PC DMA channels
1167	 */
1168
1169	dflags = claim_dma_lock();
1170
1171	disable_dma(c->txdma);
1172	clear_dma_ff(c->txdma);
1173	c->txdma_on = 0;
1174	c->tx_dma_used = 0;
1175
1176	release_dma_lock(dflags);
1177
1178	/*
1179	 *	Disable DMA control mode
1180	 */
1181
1182	c->regs[R1]&= ~WT_RDY_ENAB;
1183	write_zsreg(c, R1, c->regs[R1]);
1184	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1185	c->regs[R1]|= INT_ALL_Rx;
1186	write_zsreg(c, R1, c->regs[R1]);
1187	c->regs[R14]&= ~DTRREQ;
1188	write_zsreg(c, R14, c->regs[R14]);
1189
1190	if(c->tx_dma_buf[0])
1191	{
1192		free_page((unsigned long)c->tx_dma_buf[0]);
1193		c->tx_dma_buf[0]=NULL;
1194	}
1195	chk=read_zsreg(c,R0);
1196	write_zsreg(c, R3, c->regs[R3]);
1197	z8530_rtsdtr(c,0);
1198
1199	spin_unlock_irqrestore(c->lock, cflags);
1200	return 0;
1201}
1202
1203
1204EXPORT_SYMBOL(z8530_sync_txdma_close);
1205
1206
1207/*
1208 *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1209 *	it exists...
1210 */
1211
1212static const char *z8530_type_name[]={
1213	"Z8530",
1214	"Z85C30",
1215	"Z85230"
1216};
1217
1218/**
1219 *	z8530_describe - Uniformly describe a Z8530 port
1220 *	@dev: Z8530 device to describe
1221 *	@mapping: string holding mapping type (eg "I/O" or "Mem")
1222 *	@io: the port value in question
1223 *
1224 *	Describe a Z8530 in a standard format. We must pass the I/O as
1225 *	the port offset isn't predictable. The main reason for this function
1226 *	is to try and get a common format of report.
1227 */
1228
1229void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1230{
1231	pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1232		dev->name,
1233		z8530_type_name[dev->type],
1234		mapping,
1235		Z8530_PORT_OF(io),
1236		dev->irq);
1237}
1238
1239EXPORT_SYMBOL(z8530_describe);
1240
1241/*
1242 *	Locked operation part of the z8530 init code
1243 */
1244
1245static inline int do_z8530_init(struct z8530_dev *dev)
1246{
1247	/* NOP the interrupt handlers first - we might get a
1248	   floating IRQ transition when we reset the chip */
1249	dev->chanA.irqs=&z8530_nop;
1250	dev->chanB.irqs=&z8530_nop;
1251	dev->chanA.dcdcheck=DCD;
1252	dev->chanB.dcdcheck=DCD;
1253
1254	/* Reset the chip */
1255	write_zsreg(&dev->chanA, R9, 0xC0);
1256	udelay(200);
1257	/* Now check its valid */
1258	write_zsreg(&dev->chanA, R12, 0xAA);
1259	if(read_zsreg(&dev->chanA, R12)!=0xAA)
1260		return -ENODEV;
1261	write_zsreg(&dev->chanA, R12, 0x55);
1262	if(read_zsreg(&dev->chanA, R12)!=0x55)
1263		return -ENODEV;
1264
1265	dev->type=Z8530;
1266
1267	/*
1268	 *	See the application note.
1269	 */
1270
1271	write_zsreg(&dev->chanA, R15, 0x01);
1272
1273	/*
1274	 *	If we can set the low bit of R15 then
1275	 *	the chip is enhanced.
1276	 */
1277
1278	if(read_zsreg(&dev->chanA, R15)==0x01)
1279	{
1280		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1281		/* Put a char in the fifo */
1282		write_zsreg(&dev->chanA, R8, 0);
1283		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1284			dev->type = Z85230;	/* Has a FIFO */
1285		else
1286			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
1287	}
1288
1289	/*
1290	 *	The code assumes R7' and friends are
1291	 *	off. Use write_zsext() for these and keep
1292	 *	this bit clear.
1293	 */
1294
1295	write_zsreg(&dev->chanA, R15, 0);
1296
1297	/*
1298	 *	At this point it looks like the chip is behaving
1299	 */
1300
1301	memcpy(dev->chanA.regs, reg_init, 16);
1302	memcpy(dev->chanB.regs, reg_init ,16);
1303
1304	return 0;
1305}
1306
1307/**
1308 *	z8530_init - Initialise a Z8530 device
1309 *	@dev: Z8530 device to initialise.
1310 *
1311 *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1312 *	is present, identify the type and then program it to hopefully
1313 *	keep quite and behave. This matters a lot, a Z8530 in the wrong
1314 *	state will sometimes get into stupid modes generating 10Khz
1315 *	interrupt streams and the like.
1316 *
1317 *	We set the interrupt handler up to discard any events, in case
1318 *	we get them during reset or setp.
1319 *
1320 *	Return 0 for success, or a negative value indicating the problem
1321 *	in errno form.
1322 */
1323
1324int z8530_init(struct z8530_dev *dev)
1325{
1326	unsigned long flags;
1327	int ret;
1328
1329	/* Set up the chip level lock */
1330	spin_lock_init(&dev->lock);
1331	dev->chanA.lock = &dev->lock;
1332	dev->chanB.lock = &dev->lock;
1333
1334	spin_lock_irqsave(&dev->lock, flags);
1335	ret = do_z8530_init(dev);
1336	spin_unlock_irqrestore(&dev->lock, flags);
1337
1338	return ret;
1339}
1340
1341
1342EXPORT_SYMBOL(z8530_init);
1343
1344/**
1345 *	z8530_shutdown - Shutdown a Z8530 device
1346 *	@dev: The Z8530 chip to shutdown
1347 *
1348 *	We set the interrupt handlers to silence any interrupts. We then
1349 *	reset the chip and wait 100uS to be sure the reset completed. Just
1350 *	in case the caller then tries to do stuff.
1351 *
1352 *	This is called without the lock held
1353 */
1354
1355int z8530_shutdown(struct z8530_dev *dev)
1356{
1357	unsigned long flags;
1358	/* Reset the chip */
1359
1360	spin_lock_irqsave(&dev->lock, flags);
1361	dev->chanA.irqs=&z8530_nop;
1362	dev->chanB.irqs=&z8530_nop;
1363	write_zsreg(&dev->chanA, R9, 0xC0);
1364	/* We must lock the udelay, the chip is offlimits here */
1365	udelay(100);
1366	spin_unlock_irqrestore(&dev->lock, flags);
1367	return 0;
1368}
1369
1370EXPORT_SYMBOL(z8530_shutdown);
1371
1372/**
1373 *	z8530_channel_load - Load channel data
1374 *	@c: Z8530 channel to configure
1375 *	@rtable: table of register, value pairs
1376 *	FIXME: ioctl to allow user uploaded tables
1377 *
1378 *	Load a Z8530 channel up from the system data. We use +16 to
1379 *	indicate the "prime" registers. The value 255 terminates the
1380 *	table.
1381 */
1382
1383int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1384{
1385	unsigned long flags;
1386
1387	spin_lock_irqsave(c->lock, flags);
1388
1389	while(*rtable!=255)
1390	{
1391		int reg=*rtable++;
1392		if(reg>0x0F)
1393			write_zsreg(c, R15, c->regs[15]|1);
1394		write_zsreg(c, reg&0x0F, *rtable);
1395		if(reg>0x0F)
1396			write_zsreg(c, R15, c->regs[15]&~1);
1397		c->regs[reg]=*rtable++;
1398	}
1399	c->rx_function=z8530_null_rx;
1400	c->skb=NULL;
1401	c->tx_skb=NULL;
1402	c->tx_next_skb=NULL;
1403	c->mtu=1500;
1404	c->max=0;
1405	c->count=0;
1406	c->status=read_zsreg(c, R0);
1407	c->sync=1;
1408	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1409
1410	spin_unlock_irqrestore(c->lock, flags);
1411	return 0;
1412}
1413
1414EXPORT_SYMBOL(z8530_channel_load);
1415
1416
1417/**
1418 *	z8530_tx_begin - Begin packet transmission
1419 *	@c: The Z8530 channel to kick
1420 *
1421 *	This is the speed sensitive side of transmission. If we are called
1422 *	and no buffer is being transmitted we commence the next buffer. If
1423 *	nothing is queued we idle the sync.
1424 *
1425 *	Note: We are handling this code path in the interrupt path, keep it
1426 *	fast or bad things will happen.
1427 *
1428 *	Called with the lock held.
1429 */
1430
1431static void z8530_tx_begin(struct z8530_channel *c)
1432{
1433	unsigned long flags;
1434	if(c->tx_skb)
1435		return;
1436
1437	c->tx_skb=c->tx_next_skb;
1438	c->tx_next_skb=NULL;
1439	c->tx_ptr=c->tx_next_ptr;
1440
1441	if(c->tx_skb==NULL)
1442	{
1443		/* Idle on */
1444		if(c->dma_tx)
1445		{
1446			flags=claim_dma_lock();
1447			disable_dma(c->txdma);
1448			/*
1449			 *	Check if we crapped out.
1450			 */
1451			if (get_dma_residue(c->txdma))
1452			{
1453				c->netdevice->stats.tx_dropped++;
1454				c->netdevice->stats.tx_fifo_errors++;
1455			}
1456			release_dma_lock(flags);
1457		}
1458		c->txcount=0;
1459	}
1460	else
1461	{
1462		c->txcount=c->tx_skb->len;
1463
1464
1465		if(c->dma_tx)
1466		{
1467			/*
1468			 *	FIXME. DMA is broken for the original 8530,
1469			 *	on the older parts we need to set a flag and
1470			 *	wait for a further TX interrupt to fire this
1471			 *	stage off
1472			 */
1473
1474			flags=claim_dma_lock();
1475			disable_dma(c->txdma);
1476
1477			/*
1478			 *	These two are needed by the 8530/85C30
1479			 *	and must be issued when idling.
1480			 */
1481
1482			if(c->dev->type!=Z85230)
1483			{
1484				write_zsctrl(c, RES_Tx_CRC);
1485				write_zsctrl(c, RES_EOM_L);
1486			}
1487			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1488			clear_dma_ff(c->txdma);
1489			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1490			set_dma_count(c->txdma, c->txcount);
1491			enable_dma(c->txdma);
1492			release_dma_lock(flags);
1493			write_zsctrl(c, RES_EOM_L);
1494			write_zsreg(c, R5, c->regs[R5]|TxENAB);
1495		}
1496		else
1497		{
1498
1499			/* ABUNDER off */
1500			write_zsreg(c, R10, c->regs[10]);
1501			write_zsctrl(c, RES_Tx_CRC);
1502
1503			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1504			{
1505				write_zsreg(c, R8, *c->tx_ptr++);
1506				c->txcount--;
1507			}
1508
1509		}
1510	}
1511	/*
1512	 *	Since we emptied tx_skb we can ask for more
1513	 */
1514	netif_wake_queue(c->netdevice);
1515}
1516
1517/**
1518 *	z8530_tx_done - TX complete callback
1519 *	@c: The channel that completed a transmit.
1520 *
1521 *	This is called when we complete a packet send. We wake the queue,
1522 *	start the next packet going and then free the buffer of the existing
1523 *	packet. This code is fairly timing sensitive.
1524 *
1525 *	Called with the register lock held.
1526 */
1527
1528static void z8530_tx_done(struct z8530_channel *c)
1529{
1530	struct sk_buff *skb;
1531
1532	/* Actually this can happen.*/
1533	if (c->tx_skb == NULL)
1534		return;
1535
1536	skb = c->tx_skb;
1537	c->tx_skb = NULL;
1538	z8530_tx_begin(c);
1539	c->netdevice->stats.tx_packets++;
1540	c->netdevice->stats.tx_bytes += skb->len;
1541	dev_kfree_skb_irq(skb);
1542}
1543
1544/**
1545 *	z8530_null_rx - Discard a packet
1546 *	@c: The channel the packet arrived on
1547 *	@skb: The buffer
1548 *
1549 *	We point the receive handler at this function when idle. Instead
1550 *	of processing the frames we get to throw them away.
1551 */
1552
1553void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1554{
1555	dev_kfree_skb_any(skb);
1556}
1557
1558EXPORT_SYMBOL(z8530_null_rx);
1559
1560/**
1561 *	z8530_rx_done - Receive completion callback
1562 *	@c: The channel that completed a receive
1563 *
1564 *	A new packet is complete. Our goal here is to get back into receive
1565 *	mode as fast as possible. On the Z85230 we could change to using
1566 *	ESCC mode, but on the older chips we have no choice. We flip to the
1567 *	new buffer immediately in DMA mode so that the DMA of the next
1568 *	frame can occur while we are copying the previous buffer to an sk_buff
1569 *
1570 *	Called with the lock held
1571 */
1572
1573static void z8530_rx_done(struct z8530_channel *c)
1574{
1575	struct sk_buff *skb;
1576	int ct;
1577
1578	/*
1579	 *	Is our receive engine in DMA mode
1580	 */
1581
1582	if(c->rxdma_on)
1583	{
1584		/*
1585		 *	Save the ready state and the buffer currently
1586		 *	being used as the DMA target
1587		 */
1588
1589		int ready=c->dma_ready;
1590		unsigned char *rxb=c->rx_buf[c->dma_num];
1591		unsigned long flags;
1592
1593		/*
1594		 *	Complete this DMA. Necessary to find the length
1595		 */
1596
1597		flags=claim_dma_lock();
1598
1599		disable_dma(c->rxdma);
1600		clear_dma_ff(c->rxdma);
1601		c->rxdma_on=0;
1602		ct=c->mtu-get_dma_residue(c->rxdma);
1603		if(ct<0)
1604			ct=2;	/* Shit happens.. */
1605		c->dma_ready=0;
1606
1607		/*
1608		 *	Normal case: the other slot is free, start the next DMA
1609		 *	into it immediately.
1610		 */
1611
1612		if(ready)
1613		{
1614			c->dma_num^=1;
1615			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1616			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1617			set_dma_count(c->rxdma, c->mtu);
1618			c->rxdma_on = 1;
1619			enable_dma(c->rxdma);
1620			/* Stop any frames that we missed the head of
1621			   from passing */
1622			write_zsreg(c, R0, RES_Rx_CRC);
1623		}
1624		else
1625			/* Can't occur as we dont reenable the DMA irq until
1626			   after the flip is done */
1627			netdev_warn(c->netdevice, "DMA flip overrun!\n");
1628
1629		release_dma_lock(flags);
1630
1631		/*
1632		 *	Shove the old buffer into an sk_buff. We can't DMA
1633		 *	directly into one on a PC - it might be above the 16Mb
1634		 *	boundary. Optimisation - we could check to see if we
1635		 *	can avoid the copy. Optimisation 2 - make the memcpy
1636		 *	a copychecksum.
1637		 */
1638
1639		skb = dev_alloc_skb(ct);
1640		if (skb == NULL) {
1641			c->netdevice->stats.rx_dropped++;
1642			netdev_warn(c->netdevice, "Memory squeeze\n");
1643		} else {
1644			skb_put(skb, ct);
1645			skb_copy_to_linear_data(skb, rxb, ct);
1646			c->netdevice->stats.rx_packets++;
1647			c->netdevice->stats.rx_bytes += ct;
1648		}
1649		c->dma_ready = 1;
1650	} else {
1651		RT_LOCK;
1652		skb = c->skb;
1653
1654		/*
1655		 *	The game we play for non DMA is similar. We want to
1656		 *	get the controller set up for the next packet as fast
1657		 *	as possible. We potentially only have one byte + the
1658		 *	fifo length for this. Thus we want to flip to the new
1659		 *	buffer and then mess around copying and allocating
1660		 *	things. For the current case it doesn't matter but
1661		 *	if you build a system where the sync irq isn't blocked
1662		 *	by the kernel IRQ disable then you need only block the
1663		 *	sync IRQ for the RT_LOCK area.
1664		 *
1665		 */
1666		ct=c->count;
1667
1668		c->skb = c->skb2;
1669		c->count = 0;
1670		c->max = c->mtu;
1671		if (c->skb) {
1672			c->dptr = c->skb->data;
1673			c->max = c->mtu;
1674		} else {
1675			c->count = 0;
1676			c->max = 0;
1677		}
1678		RT_UNLOCK;
1679
1680		c->skb2 = dev_alloc_skb(c->mtu);
1681		if (c->skb2 == NULL)
1682			netdev_warn(c->netdevice, "memory squeeze\n");
1683		else
1684			skb_put(c->skb2, c->mtu);
1685		c->netdevice->stats.rx_packets++;
1686		c->netdevice->stats.rx_bytes += ct;
1687	}
1688	/*
1689	 *	If we received a frame we must now process it.
1690	 */
1691	if (skb) {
1692		skb_trim(skb, ct);
1693		c->rx_function(c, skb);
1694	} else {
1695		c->netdevice->stats.rx_dropped++;
1696		netdev_err(c->netdevice, "Lost a frame\n");
1697	}
1698}
1699
1700/**
1701 *	spans_boundary - Check a packet can be ISA DMA'd
1702 *	@skb: The buffer to check
1703 *
1704 *	Returns true if the buffer cross a DMA boundary on a PC. The poor
1705 *	thing can only DMA within a 64K block not across the edges of it.
1706 */
1707
1708static inline int spans_boundary(struct sk_buff *skb)
1709{
1710	unsigned long a=(unsigned long)skb->data;
1711	a^=(a+skb->len);
1712	if(a&0x00010000)	/* If the 64K bit is different.. */
1713		return 1;
1714	return 0;
1715}
1716
1717/**
1718 *	z8530_queue_xmit - Queue a packet
1719 *	@c: The channel to use
1720 *	@skb: The packet to kick down the channel
1721 *
1722 *	Queue a packet for transmission. Because we have rather
1723 *	hard to hit interrupt latencies for the Z85230 per packet
1724 *	even in DMA mode we do the flip to DMA buffer if needed here
1725 *	not in the IRQ.
1726 *
1727 *	Called from the network code. The lock is not held at this
1728 *	point.
1729 */
1730
1731netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1732{
1733	unsigned long flags;
1734
1735	netif_stop_queue(c->netdevice);
1736	if(c->tx_next_skb)
1737		return NETDEV_TX_BUSY;
1738
1739
1740	/* PC SPECIFIC - DMA limits */
1741
1742	/*
1743	 *	If we will DMA the transmit and its gone over the ISA bus
1744	 *	limit, then copy to the flip buffer
1745	 */
1746
1747	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1748	{
1749		/*
1750		 *	Send the flip buffer, and flip the flippy bit.
1751		 *	We don't care which is used when just so long as
1752		 *	we never use the same buffer twice in a row. Since
1753		 *	only one buffer can be going out at a time the other
1754		 *	has to be safe.
1755		 */
1756		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1757		c->tx_dma_used^=1;	/* Flip temp buffer */
1758		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1759	}
1760	else
1761		c->tx_next_ptr=skb->data;
1762	RT_LOCK;
1763	c->tx_next_skb=skb;
1764	RT_UNLOCK;
1765
1766	spin_lock_irqsave(c->lock, flags);
1767	z8530_tx_begin(c);
1768	spin_unlock_irqrestore(c->lock, flags);
1769
1770	return NETDEV_TX_OK;
1771}
1772
1773EXPORT_SYMBOL(z8530_queue_xmit);
1774
1775/*
1776 *	Module support
1777 */
1778static const char banner[] __initconst =
1779	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1780
1781static int __init z85230_init_driver(void)
1782{
1783	printk(banner);
1784	return 0;
1785}
1786module_init(z85230_init_driver);
1787
1788static void __exit z85230_cleanup_driver(void)
1789{
1790}
1791module_exit(z85230_cleanup_driver);
1792
1793MODULE_AUTHOR("Red Hat Inc.");
1794MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1795MODULE_LICENSE("GPL");
1796