1/*
2
3  he.c
4
5  ForeRunnerHE ATM Adapter driver for ATM on Linux
6  Copyright (C) 1999-2001  Naval Research Laboratory
7
8  This library is free software; you can redistribute it and/or
9  modify it under the terms of the GNU Lesser General Public
10  License as published by the Free Software Foundation; either
11  version 2.1 of the License, or (at your option) any later version.
12
13  This library is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  Lesser General Public License for more details.
17
18  You should have received a copy of the GNU Lesser General Public
19  License along with this library; if not, write to the Free Software
20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
22*/
23
24/*
25
26  he.c
27
28  ForeRunnerHE ATM Adapter driver for ATM on Linux
29  Copyright (C) 1999-2001  Naval Research Laboratory
30
31  Permission to use, copy, modify and distribute this software and its
32  documentation is hereby granted, provided that both the copyright
33  notice and this permission notice appear in all copies of the software,
34  derivative works or modified versions, and any portions thereof, and
35  that both notices appear in supporting documentation.
36
37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39  RESULTING FROM THE USE OF THIS SOFTWARE.
40
41  This driver was written using the "Programmer's Reference Manual for
42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44  AUTHORS:
45	chas williams <chas@cmf.nrl.navy.mil>
46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48  NOTES:
49	4096 supported 'connections'
50	group 0 is used for all traffic
51	interrupt queue 0 is used for all interrupts
52	aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW			/* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...)	do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static bool disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static bool irq_coalesce = 1;
120static bool sdh = 0;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124	CS_HIGH | CLK_HIGH,
125	CS_LOW | CLK_LOW,
126	CLK_HIGH,               /* 0 */
127	CLK_LOW,
128	CLK_HIGH,               /* 0 */
129	CLK_LOW,
130	CLK_HIGH,               /* 0 */
131	CLK_LOW,
132	CLK_HIGH,               /* 0 */
133	CLK_LOW,
134	CLK_HIGH,               /* 0 */
135	CLK_LOW,
136	CLK_HIGH,               /* 0 */
137	CLK_LOW | SI_HIGH,
138	CLK_HIGH | SI_HIGH,     /* 1 */
139	CLK_LOW | SI_HIGH,
140	CLK_HIGH | SI_HIGH      /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145	CLK_LOW,
146	CLK_HIGH,
147	CLK_LOW,
148	CLK_HIGH,
149	CLK_LOW,
150	CLK_HIGH,
151	CLK_LOW,
152	CLK_HIGH,
153	CLK_LOW,
154	CLK_HIGH,
155	CLK_LOW,
156	CLK_HIGH,
157	CLK_LOW,
158	CLK_HIGH,
159	CLK_LOW,
160	CLK_HIGH,
161	CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166	.open =		he_open,
167	.close =	he_close,
168	.ioctl =	he_ioctl,
169	.send =		he_send,
170	.phy_put =	he_phy_put,
171	.phy_get =	he_phy_get,
172	.proc_read =	he_proc_read,
173	.owner =	THIS_MODULE
174};
175
176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183								unsigned flags)
184{
185	he_writel(he_dev, val, CON_DAT);
186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) 				\
192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) 				\
195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) 				\
198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205	return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209			he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212			he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215			he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241	/* from page 2-20
242	 *
243	 * NOTE While the transmit connection is active, bits 23 through 0
244	 *      of this register must not be written by the host.  Byte
245	 *      enables should be used during normal operation when writing
246	 *      the most significant byte.
247	 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251							CON_CTL_TCM \
252							| CON_BYTE_DISABLE_2 \
253							| CON_BYTE_DISABLE_1 \
254							| CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294							CON_CTL_TCM \
295							| CON_BYTE_DISABLE_2 \
296							| CON_BYTE_DISABLE_1 \
297							| CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330	struct hlist_head *head;
331	struct atm_vcc *vcc;
332	struct sock *s;
333	short vpi;
334	int vci;
335
336	vpi = cid >> he_dev->vcibits;
337	vci = cid & ((1 << he_dev->vcibits) - 1);
338	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340	sk_for_each(s, head) {
341		vcc = atm_sk(s);
342		if (vcc->dev == he_dev->atm_dev &&
343		    vcc->vci == vci && vcc->vpi == vpi &&
344		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345				return vcc;
346		}
347	}
348	return NULL;
349}
350
351static int he_init_one(struct pci_dev *pci_dev,
352		       const struct pci_device_id *pci_ent)
353{
354	struct atm_dev *atm_dev = NULL;
355	struct he_dev *he_dev = NULL;
356	int err = 0;
357
358	printk(KERN_INFO "ATM he driver\n");
359
360	if (pci_enable_device(pci_dev))
361		return -EIO;
362	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363		printk(KERN_WARNING "he: no suitable dma available\n");
364		err = -EIO;
365		goto init_one_failure;
366	}
367
368	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369	if (!atm_dev) {
370		err = -ENODEV;
371		goto init_one_failure;
372	}
373	pci_set_drvdata(pci_dev, atm_dev);
374
375	he_dev = kzalloc(sizeof(struct he_dev),
376							GFP_KERNEL);
377	if (!he_dev) {
378		err = -ENOMEM;
379		goto init_one_failure;
380	}
381	he_dev->pci_dev = pci_dev;
382	he_dev->atm_dev = atm_dev;
383	he_dev->atm_dev->dev_data = he_dev;
384	atm_dev->dev_data = he_dev;
385	he_dev->number = atm_dev->number;
386	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387	spin_lock_init(&he_dev->global_lock);
388
389	if (he_start(atm_dev)) {
390		he_stop(he_dev);
391		err = -ENODEV;
392		goto init_one_failure;
393	}
394	he_dev->next = NULL;
395	if (he_devs)
396		he_dev->next = he_devs;
397	he_devs = he_dev;
398	return 0;
399
400init_one_failure:
401	if (atm_dev)
402		atm_dev_deregister(atm_dev);
403	kfree(he_dev);
404	pci_disable_device(pci_dev);
405	return err;
406}
407
408static void he_remove_one(struct pci_dev *pci_dev)
409{
410	struct atm_dev *atm_dev;
411	struct he_dev *he_dev;
412
413	atm_dev = pci_get_drvdata(pci_dev);
414	he_dev = HE_DEV(atm_dev);
415
416	/* need to remove from he_devs */
417
418	he_stop(he_dev);
419	atm_dev_deregister(atm_dev);
420	kfree(he_dev);
421
422	pci_disable_device(pci_dev);
423}
424
425
426static unsigned
427rate_to_atmf(unsigned rate)		/* cps to atm forum format */
428{
429#define NONZERO (1 << 14)
430
431	unsigned exp = 0;
432
433	if (rate == 0)
434		return 0;
435
436	rate <<= 9;
437	while (rate > 0x3ff) {
438		++exp;
439		rate >>= 1;
440	}
441
442	return (NONZERO | (exp << 9) | (rate & 0x1ff));
443}
444
445static void he_init_rx_lbfp0(struct he_dev *he_dev)
446{
447	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451
452	lbufd_index = 0;
453	lbm_offset = he_readl(he_dev, RCMLBM_BA);
454
455	he_writel(he_dev, lbufd_index, RLBF0_H);
456
457	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458		lbufd_index += 2;
459		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460
461		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463
464		if (++lbuf_count == lbufs_per_row) {
465			lbuf_count = 0;
466			row_offset += he_dev->bytes_per_row;
467		}
468		lbm_offset += 4;
469	}
470
471	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473}
474
475static void he_init_rx_lbfp1(struct he_dev *he_dev)
476{
477	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481
482	lbufd_index = 1;
483	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484
485	he_writel(he_dev, lbufd_index, RLBF1_H);
486
487	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488		lbufd_index += 2;
489		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490
491		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493
494		if (++lbuf_count == lbufs_per_row) {
495			lbuf_count = 0;
496			row_offset += he_dev->bytes_per_row;
497		}
498		lbm_offset += 4;
499	}
500
501	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503}
504
505static void he_init_tx_lbfp(struct he_dev *he_dev)
506{
507	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511
512	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515	he_writel(he_dev, lbufd_index, TLBF_H);
516
517	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518		lbufd_index += 1;
519		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524		if (++lbuf_count == lbufs_per_row) {
525			lbuf_count = 0;
526			row_offset += he_dev->bytes_per_row;
527		}
528		lbm_offset += 2;
529	}
530
531	he_writel(he_dev, lbufd_index - 1, TLBF_T);
532}
533
534static int he_init_tpdrq(struct he_dev *he_dev)
535{
536	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
537						 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538						 &he_dev->tpdrq_phys, GFP_KERNEL);
539	if (he_dev->tpdrq_base == NULL) {
540		hprintk("failed to alloc tpdrq\n");
541		return -ENOMEM;
542	}
543
544	he_dev->tpdrq_tail = he_dev->tpdrq_base;
545	he_dev->tpdrq_head = he_dev->tpdrq_base;
546
547	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548	he_writel(he_dev, 0, TPDRQ_T);
549	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550
551	return 0;
552}
553
554static void he_init_cs_block(struct he_dev *he_dev)
555{
556	unsigned clock, rate, delta;
557	int reg;
558
559	/* 5.1.7 cs block initialization */
560
561	for (reg = 0; reg < 0x20; ++reg)
562		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563
564	/* rate grid timer reload values */
565
566	clock = he_is622(he_dev) ? 66667000 : 50000000;
567	rate = he_dev->atm_dev->link_rate;
568	delta = rate / 16 / 2;
569
570	for (reg = 0; reg < 0x10; ++reg) {
571		/* 2.4 internal transmit function
572		 *
573	 	 * we initialize the first row in the rate grid.
574		 * values are period (in clock cycles) of timer
575		 */
576		unsigned period = clock / rate;
577
578		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579		rate -= delta;
580	}
581
582	if (he_is622(he_dev)) {
583		/* table 5.2 (4 cells per lbuf) */
584		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589
590		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597
598		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599
600		/* table 5.8 */
601		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607
608		/* table 5.9 */
609		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611	} else {
612		/* table 5.1 (4 cells per lbuf) */
613		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618
619		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626
627		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628
629		/* table 5.8 */
630		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636
637		/* table 5.9 */
638		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640	}
641
642	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643
644	for (reg = 0; reg < 0x8; ++reg)
645		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646
647}
648
649static int he_init_cs_block_rcm(struct he_dev *he_dev)
650{
651	unsigned (*rategrid)[16][16];
652	unsigned rate, delta;
653	int i, j, reg;
654
655	unsigned rate_atmf, exp, man;
656	unsigned long long rate_cps;
657	int mult, buf, buf_limit = 4;
658
659	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660	if (!rategrid)
661		return -ENOMEM;
662
663	/* initialize rate grid group table */
664
665	for (reg = 0x0; reg < 0xff; ++reg)
666		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667
668	/* initialize rate controller groups */
669
670	for (reg = 0x100; reg < 0x1ff; ++reg)
671		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672
673	/* initialize tNrm lookup table */
674
675	/* the manual makes reference to a routine in a sample driver
676	   for proper configuration; fortunately, we only need this
677	   in order to support abr connection */
678
679	/* initialize rate to group table */
680
681	rate = he_dev->atm_dev->link_rate;
682	delta = rate / 32;
683
684	/*
685	 * 2.4 transmit internal functions
686	 *
687	 * we construct a copy of the rate grid used by the scheduler
688	 * in order to construct the rate to group table below
689	 */
690
691	for (j = 0; j < 16; j++) {
692		(*rategrid)[0][j] = rate;
693		rate -= delta;
694	}
695
696	for (i = 1; i < 16; i++)
697		for (j = 0; j < 16; j++)
698			if (i > 14)
699				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700			else
701				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702
703	/*
704	 * 2.4 transmit internal function
705	 *
706	 * this table maps the upper 5 bits of exponent and mantissa
707	 * of the atm forum representation of the rate into an index
708	 * on rate grid
709	 */
710
711	rate_atmf = 0;
712	while (rate_atmf < 0x400) {
713		man = (rate_atmf & 0x1f) << 4;
714		exp = rate_atmf >> 5;
715
716		/*
717			instead of '/ 512', use '>> 9' to prevent a call
718			to divdu3 on x86 platforms
719		*/
720		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
721
722		if (rate_cps < 10)
723			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
724
725		for (i = 255; i > 0; i--)
726			if ((*rategrid)[i/16][i%16] >= rate_cps)
727				break;	 /* pick nearest rate instead? */
728
729		/*
730		 * each table entry is 16 bits: (rate grid index (8 bits)
731		 * and a buffer limit (8 bits)
732		 * there are two table entries in each 32-bit register
733		 */
734
735#ifdef notdef
736		buf = rate_cps * he_dev->tx_numbuffs /
737				(he_dev->atm_dev->link_rate * 2);
738#else
739		/* this is pretty, but avoids _divdu3 and is mostly correct */
740		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741		if (rate_cps > (272 * mult))
742			buf = 4;
743		else if (rate_cps > (204 * mult))
744			buf = 3;
745		else if (rate_cps > (136 * mult))
746			buf = 2;
747		else if (rate_cps > (68 * mult))
748			buf = 1;
749		else
750			buf = 0;
751#endif
752		if (buf > buf_limit)
753			buf = buf_limit;
754		reg = (reg << 16) | ((i << 8) | buf);
755
756#define RTGTBL_OFFSET 0x400
757
758		if (rate_atmf & 0x1)
759			he_writel_rcm(he_dev, reg,
760				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761
762		++rate_atmf;
763	}
764
765	kfree(rategrid);
766	return 0;
767}
768
769static int he_init_group(struct he_dev *he_dev, int group)
770{
771	struct he_buff *heb, *next;
772	dma_addr_t mapping;
773	int i;
774
775	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779		  G0_RBPS_BS + (group * 32));
780
781	/* bitmap table */
782	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
783				     * sizeof(unsigned long), GFP_KERNEL);
784	if (!he_dev->rbpl_table) {
785		hprintk("unable to allocate rbpl bitmap table\n");
786		return -ENOMEM;
787	}
788	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
789
790	/* rbpl_virt 64-bit pointers */
791	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
792				    * sizeof(struct he_buff *), GFP_KERNEL);
793	if (!he_dev->rbpl_virt) {
794		hprintk("unable to allocate rbpl virt table\n");
795		goto out_free_rbpl_table;
796	}
797
798	/* large buffer pool */
799	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800					    CONFIG_RBPL_BUFSIZE, 64, 0);
801	if (he_dev->rbpl_pool == NULL) {
802		hprintk("unable to create rbpl pool\n");
803		goto out_free_rbpl_virt;
804	}
805
806	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
807						CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808						&he_dev->rbpl_phys, GFP_KERNEL);
809	if (he_dev->rbpl_base == NULL) {
810		hprintk("failed to alloc rbpl_base\n");
811		goto out_destroy_rbpl_pool;
812	}
813
814	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
815
816	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817
818		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
819		if (!heb)
820			goto out_free_rbpl;
821		heb->mapping = mapping;
822		list_add(&heb->entry, &he_dev->rbpl_outstanding);
823
824		set_bit(i, he_dev->rbpl_table);
825		he_dev->rbpl_virt[i] = heb;
826		he_dev->rbpl_hint = i + 1;
827		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
828		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
829	}
830	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
831
832	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834						G0_RBPL_T + (group * 32));
835	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836						G0_RBPL_BS + (group * 32));
837	he_writel(he_dev,
838			RBP_THRESH(CONFIG_RBPL_THRESH) |
839			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
840			RBP_INT_ENB,
841						G0_RBPL_QI + (group * 32));
842
843	/* rx buffer ready queue */
844
845	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
846						CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847						&he_dev->rbrq_phys, GFP_KERNEL);
848	if (he_dev->rbrq_base == NULL) {
849		hprintk("failed to allocate rbrq\n");
850		goto out_free_rbpl;
851	}
852
853	he_dev->rbrq_head = he_dev->rbrq_base;
854	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
856	he_writel(he_dev,
857		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858						G0_RBRQ_Q + (group * 16));
859	if (irq_coalesce) {
860		hprintk("coalescing interrupts\n");
861		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862						G0_RBRQ_I + (group * 16));
863	} else
864		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865						G0_RBRQ_I + (group * 16));
866
867	/* tx buffer ready queue */
868
869	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
870						CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871						&he_dev->tbrq_phys, GFP_KERNEL);
872	if (he_dev->tbrq_base == NULL) {
873		hprintk("failed to allocate tbrq\n");
874		goto out_free_rbpq_base;
875	}
876
877	he_dev->tbrq_head = he_dev->tbrq_base;
878
879	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
883
884	return 0;
885
886out_free_rbpq_base:
887	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888			  sizeof(struct he_rbrq), he_dev->rbrq_base,
889			  he_dev->rbrq_phys);
890out_free_rbpl:
891	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893
894	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895			  sizeof(struct he_rbp), he_dev->rbpl_base,
896			  he_dev->rbpl_phys);
897out_destroy_rbpl_pool:
898	dma_pool_destroy(he_dev->rbpl_pool);
899out_free_rbpl_virt:
900	kfree(he_dev->rbpl_virt);
901out_free_rbpl_table:
902	kfree(he_dev->rbpl_table);
903
904	return -ENOMEM;
905}
906
907static int he_init_irq(struct he_dev *he_dev)
908{
909	int i;
910
911	/* 2.9.3.5  tail offset for each interrupt queue is located after the
912		    end of the interrupt queue */
913
914	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
915					       (CONFIG_IRQ_SIZE + 1)
916					       * sizeof(struct he_irq),
917					       &he_dev->irq_phys,
918					       GFP_KERNEL);
919	if (he_dev->irq_base == NULL) {
920		hprintk("failed to allocate irq\n");
921		return -ENOMEM;
922	}
923	he_dev->irq_tailoffset = (unsigned *)
924					&he_dev->irq_base[CONFIG_IRQ_SIZE];
925	*he_dev->irq_tailoffset = 0;
926	he_dev->irq_head = he_dev->irq_base;
927	he_dev->irq_tail = he_dev->irq_base;
928
929	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
930		he_dev->irq_base[i].isw = ITYPE_INVALID;
931
932	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
933	he_writel(he_dev,
934		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
935								IRQ0_HEAD);
936	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
937	he_writel(he_dev, 0x0, IRQ0_DATA);
938
939	he_writel(he_dev, 0x0, IRQ1_BASE);
940	he_writel(he_dev, 0x0, IRQ1_HEAD);
941	he_writel(he_dev, 0x0, IRQ1_CNTL);
942	he_writel(he_dev, 0x0, IRQ1_DATA);
943
944	he_writel(he_dev, 0x0, IRQ2_BASE);
945	he_writel(he_dev, 0x0, IRQ2_HEAD);
946	he_writel(he_dev, 0x0, IRQ2_CNTL);
947	he_writel(he_dev, 0x0, IRQ2_DATA);
948
949	he_writel(he_dev, 0x0, IRQ3_BASE);
950	he_writel(he_dev, 0x0, IRQ3_HEAD);
951	he_writel(he_dev, 0x0, IRQ3_CNTL);
952	he_writel(he_dev, 0x0, IRQ3_DATA);
953
954	/* 2.9.3.2 interrupt queue mapping registers */
955
956	he_writel(he_dev, 0x0, GRP_10_MAP);
957	he_writel(he_dev, 0x0, GRP_32_MAP);
958	he_writel(he_dev, 0x0, GRP_54_MAP);
959	he_writel(he_dev, 0x0, GRP_76_MAP);
960
961	if (request_irq(he_dev->pci_dev->irq,
962			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
963		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
964		return -EINVAL;
965	}
966
967	he_dev->irq = he_dev->pci_dev->irq;
968
969	return 0;
970}
971
972static int he_start(struct atm_dev *dev)
973{
974	struct he_dev *he_dev;
975	struct pci_dev *pci_dev;
976	unsigned long membase;
977
978	u16 command;
979	u32 gen_cntl_0, host_cntl, lb_swap;
980	u8 cache_size, timer;
981
982	unsigned err;
983	unsigned int status, reg;
984	int i, group;
985
986	he_dev = HE_DEV(dev);
987	pci_dev = he_dev->pci_dev;
988
989	membase = pci_resource_start(pci_dev, 0);
990	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
991
992	/*
993	 * pci bus controller initialization
994	 */
995
996	/* 4.3 pci bus controller-specific initialization */
997	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
998		hprintk("can't read GEN_CNTL_0\n");
999		return -EINVAL;
1000	}
1001	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003		hprintk("can't write GEN_CNTL_0.\n");
1004		return -EINVAL;
1005	}
1006
1007	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008		hprintk("can't read PCI_COMMAND.\n");
1009		return -EINVAL;
1010	}
1011
1012	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014		hprintk("can't enable memory.\n");
1015		return -EINVAL;
1016	}
1017
1018	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019		hprintk("can't read cache line size?\n");
1020		return -EINVAL;
1021	}
1022
1023	if (cache_size < 16) {
1024		cache_size = 16;
1025		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026			hprintk("can't set cache line size to %d\n", cache_size);
1027	}
1028
1029	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030		hprintk("can't read latency timer?\n");
1031		return -EINVAL;
1032	}
1033
1034	/* from table 3.9
1035	 *
1036	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1037	 *
1038	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1040	 *
1041	 */
1042#define LAT_TIMER 209
1043	if (timer < LAT_TIMER) {
1044		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1045		timer = LAT_TIMER;
1046		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047			hprintk("can't set latency timer to %d\n", timer);
1048	}
1049
1050	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051		hprintk("can't set up page mapping\n");
1052		return -EINVAL;
1053	}
1054
1055	/* 4.4 card reset */
1056	he_writel(he_dev, 0x0, RESET_CNTL);
1057	he_writel(he_dev, 0xff, RESET_CNTL);
1058
1059	msleep(16);	/* 16 ms */
1060	status = he_readl(he_dev, RESET_CNTL);
1061	if ((status & BOARD_RST_STATUS) == 0) {
1062		hprintk("reset failed\n");
1063		return -EINVAL;
1064	}
1065
1066	/* 4.5 set bus width */
1067	host_cntl = he_readl(he_dev, HOST_CNTL);
1068	if (host_cntl & PCI_BUS_SIZE64)
1069		gen_cntl_0 |= ENBL_64;
1070	else
1071		gen_cntl_0 &= ~ENBL_64;
1072
1073	if (disable64 == 1) {
1074		hprintk("disabling 64-bit pci bus transfers\n");
1075		gen_cntl_0 &= ~ENBL_64;
1076	}
1077
1078	if (gen_cntl_0 & ENBL_64)
1079		hprintk("64-bit transfers enabled\n");
1080
1081	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1082
1083	/* 4.7 read prom contents */
1084	for (i = 0; i < PROD_ID_LEN; ++i)
1085		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1086
1087	he_dev->media = read_prom_byte(he_dev, MEDIA);
1088
1089	for (i = 0; i < 6; ++i)
1090		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1091
1092	hprintk("%s%s, %pM\n", he_dev->prod_id,
1093		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1094	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1095						ATM_OC12_PCR : ATM_OC3_PCR;
1096
1097	/* 4.6 set host endianess */
1098	lb_swap = he_readl(he_dev, LB_SWAP);
1099	if (he_is622(he_dev))
1100		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1101	else
1102		lb_swap |= XFER_SIZE;		/* 8 cells */
1103#ifdef __BIG_ENDIAN
1104	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1105#else
1106	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1107			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1108#endif /* __BIG_ENDIAN */
1109	he_writel(he_dev, lb_swap, LB_SWAP);
1110
1111	/* 4.8 sdram controller initialization */
1112	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1113
1114	/* 4.9 initialize rnum value */
1115	lb_swap |= SWAP_RNUM_MAX(0xf);
1116	he_writel(he_dev, lb_swap, LB_SWAP);
1117
1118	/* 4.10 initialize the interrupt queues */
1119	if ((err = he_init_irq(he_dev)) != 0)
1120		return err;
1121
1122	/* 4.11 enable pci bus controller state machines */
1123	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1124				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1125	he_writel(he_dev, host_cntl, HOST_CNTL);
1126
1127	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1128	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1129
1130	/*
1131	 * atm network controller initialization
1132	 */
1133
1134	/* 5.1.1 generic configuration state */
1135
1136	/*
1137	 *		local (cell) buffer memory map
1138	 *
1139	 *             HE155                          HE622
1140	 *
1141	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1142	 *         |            |            |                   |   |
1143	 *         |  utility   |            |        rx0        |   |
1144	 *        5|____________|         255|___________________| u |
1145	 *        6|            |         256|                   | t |
1146	 *         |            |            |                   | i |
1147	 *         |    rx0     |     row    |        tx         | l |
1148	 *         |            |            |                   | i |
1149	 *         |            |         767|___________________| t |
1150	 *      517|____________|         768|                   | y |
1151	 * row  518|            |            |        rx1        |   |
1152	 *         |            |        1023|___________________|___|
1153	 *         |            |
1154	 *         |    tx      |
1155	 *         |            |
1156	 *         |            |
1157	 *     1535|____________|
1158	 *     1536|            |
1159	 *         |    rx1     |
1160	 *     2047|____________|
1161	 *
1162	 */
1163
1164	/* total 4096 connections */
1165	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1166	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1167
1168	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1169		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1170		return -ENODEV;
1171	}
1172
1173	if (nvpibits != -1) {
1174		he_dev->vpibits = nvpibits;
1175		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1176	}
1177
1178	if (nvcibits != -1) {
1179		he_dev->vcibits = nvcibits;
1180		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1181	}
1182
1183
1184	if (he_is622(he_dev)) {
1185		he_dev->cells_per_row = 40;
1186		he_dev->bytes_per_row = 2048;
1187		he_dev->r0_numrows = 256;
1188		he_dev->tx_numrows = 512;
1189		he_dev->r1_numrows = 256;
1190		he_dev->r0_startrow = 0;
1191		he_dev->tx_startrow = 256;
1192		he_dev->r1_startrow = 768;
1193	} else {
1194		he_dev->cells_per_row = 20;
1195		he_dev->bytes_per_row = 1024;
1196		he_dev->r0_numrows = 512;
1197		he_dev->tx_numrows = 1018;
1198		he_dev->r1_numrows = 512;
1199		he_dev->r0_startrow = 6;
1200		he_dev->tx_startrow = 518;
1201		he_dev->r1_startrow = 1536;
1202	}
1203
1204	he_dev->cells_per_lbuf = 4;
1205	he_dev->buffer_limit = 4;
1206	he_dev->r0_numbuffs = he_dev->r0_numrows *
1207				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1208	if (he_dev->r0_numbuffs > 2560)
1209		he_dev->r0_numbuffs = 2560;
1210
1211	he_dev->r1_numbuffs = he_dev->r1_numrows *
1212				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1213	if (he_dev->r1_numbuffs > 2560)
1214		he_dev->r1_numbuffs = 2560;
1215
1216	he_dev->tx_numbuffs = he_dev->tx_numrows *
1217				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1218	if (he_dev->tx_numbuffs > 5120)
1219		he_dev->tx_numbuffs = 5120;
1220
1221	/* 5.1.2 configure hardware dependent registers */
1222
1223	he_writel(he_dev,
1224		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1225		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1226		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1227		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1228								LBARB);
1229
1230	he_writel(he_dev, BANK_ON |
1231		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1232								SDRAMCON);
1233
1234	he_writel(he_dev,
1235		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1236						RM_RW_WAIT(1), RCMCONFIG);
1237	he_writel(he_dev,
1238		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1239						TM_RW_WAIT(1), TCMCONFIG);
1240
1241	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1242
1243	he_writel(he_dev,
1244		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1245		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1246		RX_VALVP(he_dev->vpibits) |
1247		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1248
1249	he_writel(he_dev, DRF_THRESH(0x20) |
1250		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1251		TX_VCI_MASK(he_dev->vcibits) |
1252		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1253
1254	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1255
1256	he_writel(he_dev, PHY_INT_ENB |
1257		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1258								RH_CONFIG);
1259
1260	/* 5.1.3 initialize connection memory */
1261
1262	for (i = 0; i < TCM_MEM_SIZE; ++i)
1263		he_writel_tcm(he_dev, 0, i);
1264
1265	for (i = 0; i < RCM_MEM_SIZE; ++i)
1266		he_writel_rcm(he_dev, 0, i);
1267
1268	/*
1269	 *	transmit connection memory map
1270	 *
1271	 *                  tx memory
1272	 *          0x0 ___________________
1273	 *             |                   |
1274	 *             |                   |
1275	 *             |       TSRa        |
1276	 *             |                   |
1277	 *             |                   |
1278	 *       0x8000|___________________|
1279	 *             |                   |
1280	 *             |       TSRb        |
1281	 *       0xc000|___________________|
1282	 *             |                   |
1283	 *             |       TSRc        |
1284	 *       0xe000|___________________|
1285	 *             |       TSRd        |
1286	 *       0xf000|___________________|
1287	 *             |       tmABR       |
1288	 *      0x10000|___________________|
1289	 *             |                   |
1290	 *             |       tmTPD       |
1291	 *             |___________________|
1292	 *             |                   |
1293	 *                      ....
1294	 *      0x1ffff|___________________|
1295	 *
1296	 *
1297	 */
1298
1299	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1300	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1301	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1302	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1303	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1304
1305
1306	/*
1307	 *	receive connection memory map
1308	 *
1309	 *          0x0 ___________________
1310	 *             |                   |
1311	 *             |                   |
1312	 *             |       RSRa        |
1313	 *             |                   |
1314	 *             |                   |
1315	 *       0x8000|___________________|
1316	 *             |                   |
1317	 *             |             rx0/1 |
1318	 *             |       LBM         |   link lists of local
1319	 *             |             tx    |   buffer memory
1320	 *             |                   |
1321	 *       0xd000|___________________|
1322	 *             |                   |
1323	 *             |      rmABR        |
1324	 *       0xe000|___________________|
1325	 *             |                   |
1326	 *             |       RSRb        |
1327	 *             |___________________|
1328	 *             |                   |
1329	 *                      ....
1330	 *       0xffff|___________________|
1331	 */
1332
1333	he_writel(he_dev, 0x08000, RCMLBM_BA);
1334	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1335	he_writel(he_dev, 0x0d800, RCMABR_BA);
1336
1337	/* 5.1.4 initialize local buffer free pools linked lists */
1338
1339	he_init_rx_lbfp0(he_dev);
1340	he_init_rx_lbfp1(he_dev);
1341
1342	he_writel(he_dev, 0x0, RLBC_H);
1343	he_writel(he_dev, 0x0, RLBC_T);
1344	he_writel(he_dev, 0x0, RLBC_H2);
1345
1346	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1347	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1348
1349	he_init_tx_lbfp(he_dev);
1350
1351	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1352
1353	/* 5.1.5 initialize intermediate receive queues */
1354
1355	if (he_is622(he_dev)) {
1356		he_writel(he_dev, 0x000f, G0_INMQ_S);
1357		he_writel(he_dev, 0x200f, G0_INMQ_L);
1358
1359		he_writel(he_dev, 0x001f, G1_INMQ_S);
1360		he_writel(he_dev, 0x201f, G1_INMQ_L);
1361
1362		he_writel(he_dev, 0x002f, G2_INMQ_S);
1363		he_writel(he_dev, 0x202f, G2_INMQ_L);
1364
1365		he_writel(he_dev, 0x003f, G3_INMQ_S);
1366		he_writel(he_dev, 0x203f, G3_INMQ_L);
1367
1368		he_writel(he_dev, 0x004f, G4_INMQ_S);
1369		he_writel(he_dev, 0x204f, G4_INMQ_L);
1370
1371		he_writel(he_dev, 0x005f, G5_INMQ_S);
1372		he_writel(he_dev, 0x205f, G5_INMQ_L);
1373
1374		he_writel(he_dev, 0x006f, G6_INMQ_S);
1375		he_writel(he_dev, 0x206f, G6_INMQ_L);
1376
1377		he_writel(he_dev, 0x007f, G7_INMQ_S);
1378		he_writel(he_dev, 0x207f, G7_INMQ_L);
1379	} else {
1380		he_writel(he_dev, 0x0000, G0_INMQ_S);
1381		he_writel(he_dev, 0x0008, G0_INMQ_L);
1382
1383		he_writel(he_dev, 0x0001, G1_INMQ_S);
1384		he_writel(he_dev, 0x0009, G1_INMQ_L);
1385
1386		he_writel(he_dev, 0x0002, G2_INMQ_S);
1387		he_writel(he_dev, 0x000a, G2_INMQ_L);
1388
1389		he_writel(he_dev, 0x0003, G3_INMQ_S);
1390		he_writel(he_dev, 0x000b, G3_INMQ_L);
1391
1392		he_writel(he_dev, 0x0004, G4_INMQ_S);
1393		he_writel(he_dev, 0x000c, G4_INMQ_L);
1394
1395		he_writel(he_dev, 0x0005, G5_INMQ_S);
1396		he_writel(he_dev, 0x000d, G5_INMQ_L);
1397
1398		he_writel(he_dev, 0x0006, G6_INMQ_S);
1399		he_writel(he_dev, 0x000e, G6_INMQ_L);
1400
1401		he_writel(he_dev, 0x0007, G7_INMQ_S);
1402		he_writel(he_dev, 0x000f, G7_INMQ_L);
1403	}
1404
1405	/* 5.1.6 application tunable parameters */
1406
1407	he_writel(he_dev, 0x0, MCC);
1408	he_writel(he_dev, 0x0, OEC);
1409	he_writel(he_dev, 0x0, DCC);
1410	he_writel(he_dev, 0x0, CEC);
1411
1412	/* 5.1.7 cs block initialization */
1413
1414	he_init_cs_block(he_dev);
1415
1416	/* 5.1.8 cs block connection memory initialization */
1417
1418	if (he_init_cs_block_rcm(he_dev) < 0)
1419		return -ENOMEM;
1420
1421	/* 5.1.10 initialize host structures */
1422
1423	he_init_tpdrq(he_dev);
1424
1425	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1426					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1427	if (he_dev->tpd_pool == NULL) {
1428		hprintk("unable to create tpd dma_pool\n");
1429		return -ENOMEM;
1430	}
1431
1432	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1433
1434	if (he_init_group(he_dev, 0) != 0)
1435		return -ENOMEM;
1436
1437	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1438		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1439		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1440		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1441		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1442						G0_RBPS_BS + (group * 32));
1443
1444		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1445		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1446		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1447						G0_RBPL_QI + (group * 32));
1448		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1449
1450		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1451		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1452		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1453						G0_RBRQ_Q + (group * 16));
1454		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1455
1456		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1457		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1458		he_writel(he_dev, TBRQ_THRESH(0x1),
1459						G0_TBRQ_THRESH + (group * 16));
1460		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1461	}
1462
1463	/* host status page */
1464
1465	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1466					  sizeof(struct he_hsp),
1467					  &he_dev->hsp_phys, GFP_KERNEL);
1468	if (he_dev->hsp == NULL) {
1469		hprintk("failed to allocate host status page\n");
1470		return -ENOMEM;
1471	}
1472	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1473
1474	/* initialize framer */
1475
1476#ifdef CONFIG_ATM_HE_USE_SUNI
1477	if (he_isMM(he_dev))
1478		suni_init(he_dev->atm_dev);
1479	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1480		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1481#endif /* CONFIG_ATM_HE_USE_SUNI */
1482
1483	if (sdh) {
1484		/* this really should be in suni.c but for now... */
1485		int val;
1486
1487		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1488		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1489		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1490		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1491	}
1492
1493	/* 5.1.12 enable transmit and receive */
1494
1495	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1496	reg |= TX_ENABLE|ER_ENABLE;
1497	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1498
1499	reg = he_readl(he_dev, RC_CONFIG);
1500	reg |= RX_ENABLE;
1501	he_writel(he_dev, reg, RC_CONFIG);
1502
1503	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1504		he_dev->cs_stper[i].inuse = 0;
1505		he_dev->cs_stper[i].pcr = -1;
1506	}
1507	he_dev->total_bw = 0;
1508
1509
1510	/* atm linux initialization */
1511
1512	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1513	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1514
1515	he_dev->irq_peak = 0;
1516	he_dev->rbrq_peak = 0;
1517	he_dev->rbpl_peak = 0;
1518	he_dev->tbrq_peak = 0;
1519
1520	HPRINTK("hell bent for leather!\n");
1521
1522	return 0;
1523}
1524
1525static void
1526he_stop(struct he_dev *he_dev)
1527{
1528	struct he_buff *heb, *next;
1529	struct pci_dev *pci_dev;
1530	u32 gen_cntl_0, reg;
1531	u16 command;
1532
1533	pci_dev = he_dev->pci_dev;
1534
1535	/* disable interrupts */
1536
1537	if (he_dev->membase) {
1538		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1539		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1540		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1541
1542		tasklet_disable(&he_dev->tasklet);
1543
1544		/* disable recv and transmit */
1545
1546		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547		reg &= ~(TX_ENABLE|ER_ENABLE);
1548		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1549
1550		reg = he_readl(he_dev, RC_CONFIG);
1551		reg &= ~(RX_ENABLE);
1552		he_writel(he_dev, reg, RC_CONFIG);
1553	}
1554
1555#ifdef CONFIG_ATM_HE_USE_SUNI
1556	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1557		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1558#endif /* CONFIG_ATM_HE_USE_SUNI */
1559
1560	if (he_dev->irq)
1561		free_irq(he_dev->irq, he_dev);
1562
1563	if (he_dev->irq_base)
1564		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1565				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1566
1567	if (he_dev->hsp)
1568		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1569				  he_dev->hsp, he_dev->hsp_phys);
1570
1571	if (he_dev->rbpl_base) {
1572		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1573			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1574
1575		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1576				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1577	}
1578
1579	kfree(he_dev->rbpl_virt);
1580	kfree(he_dev->rbpl_table);
1581
1582	if (he_dev->rbpl_pool)
1583		dma_pool_destroy(he_dev->rbpl_pool);
1584
1585	if (he_dev->rbrq_base)
1586		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1587				  he_dev->rbrq_base, he_dev->rbrq_phys);
1588
1589	if (he_dev->tbrq_base)
1590		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591				  he_dev->tbrq_base, he_dev->tbrq_phys);
1592
1593	if (he_dev->tpdrq_base)
1594		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596
1597	if (he_dev->tpd_pool)
1598		dma_pool_destroy(he_dev->tpd_pool);
1599
1600	if (he_dev->pci_dev) {
1601		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1602		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1603		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1604	}
1605
1606	if (he_dev->membase)
1607		iounmap(he_dev->membase);
1608}
1609
1610static struct he_tpd *
1611__alloc_tpd(struct he_dev *he_dev)
1612{
1613	struct he_tpd *tpd;
1614	dma_addr_t mapping;
1615
1616	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1617	if (tpd == NULL)
1618		return NULL;
1619
1620	tpd->status = TPD_ADDR(mapping);
1621	tpd->reserved = 0;
1622	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1623	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1624	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1625
1626	return tpd;
1627}
1628
1629#define AAL5_LEN(buf,len) 						\
1630			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1631				(((unsigned char *)(buf))[(len)-5]))
1632
1633/* 2.10.1.2 receive
1634 *
1635 * aal5 packets can optionally return the tcp checksum in the lower
1636 * 16 bits of the crc (RSR0_TCP_CKSUM)
1637 */
1638
1639#define TCP_CKSUM(buf,len) 						\
1640			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1641				(((unsigned char *)(buf))[(len-1)]))
1642
1643static int
1644he_service_rbrq(struct he_dev *he_dev, int group)
1645{
1646	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1647				((unsigned long)he_dev->rbrq_base |
1648					he_dev->hsp->group[group].rbrq_tail);
1649	unsigned cid, lastcid = -1;
1650	struct sk_buff *skb;
1651	struct atm_vcc *vcc = NULL;
1652	struct he_vcc *he_vcc;
1653	struct he_buff *heb, *next;
1654	int i;
1655	int pdus_assembled = 0;
1656	int updated = 0;
1657
1658	read_lock(&vcc_sklist_lock);
1659	while (he_dev->rbrq_head != rbrq_tail) {
1660		++updated;
1661
1662		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1663			he_dev->rbrq_head, group,
1664			RBRQ_ADDR(he_dev->rbrq_head),
1665			RBRQ_BUFLEN(he_dev->rbrq_head),
1666			RBRQ_CID(he_dev->rbrq_head),
1667			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1668			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1669			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1670			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1671			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1672			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1673
1674		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1675		heb = he_dev->rbpl_virt[i];
1676
1677		cid = RBRQ_CID(he_dev->rbrq_head);
1678		if (cid != lastcid)
1679			vcc = __find_vcc(he_dev, cid);
1680		lastcid = cid;
1681
1682		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1683			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1684			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1685				clear_bit(i, he_dev->rbpl_table);
1686				list_del(&heb->entry);
1687				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1688			}
1689
1690			goto next_rbrq_entry;
1691		}
1692
1693		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1694			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1695				atomic_inc(&vcc->stats->rx_drop);
1696			goto return_host_buffers;
1697		}
1698
1699		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1700		clear_bit(i, he_dev->rbpl_table);
1701		list_move_tail(&heb->entry, &he_vcc->buffers);
1702		he_vcc->pdu_len += heb->len;
1703
1704		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1705			lastcid = -1;
1706			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1707			wake_up(&he_vcc->rx_waitq);
1708			goto return_host_buffers;
1709		}
1710
1711		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1712			goto next_rbrq_entry;
1713
1714		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1715				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1716			HPRINTK("%s%s (%d.%d)\n",
1717				RBRQ_CRC_ERR(he_dev->rbrq_head)
1718							? "CRC_ERR " : "",
1719				RBRQ_LEN_ERR(he_dev->rbrq_head)
1720							? "LEN_ERR" : "",
1721							vcc->vpi, vcc->vci);
1722			atomic_inc(&vcc->stats->rx_err);
1723			goto return_host_buffers;
1724		}
1725
1726		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1727							GFP_ATOMIC);
1728		if (!skb) {
1729			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1730			goto return_host_buffers;
1731		}
1732
1733		if (rx_skb_reserve > 0)
1734			skb_reserve(skb, rx_skb_reserve);
1735
1736		__net_timestamp(skb);
1737
1738		list_for_each_entry(heb, &he_vcc->buffers, entry)
1739			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1740
1741		switch (vcc->qos.aal) {
1742			case ATM_AAL0:
1743				/* 2.10.1.5 raw cell receive */
1744				skb->len = ATM_AAL0_SDU;
1745				skb_set_tail_pointer(skb, skb->len);
1746				break;
1747			case ATM_AAL5:
1748				/* 2.10.1.2 aal5 receive */
1749
1750				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1751				skb_set_tail_pointer(skb, skb->len);
1752#ifdef USE_CHECKSUM_HW
1753				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1754					skb->ip_summed = CHECKSUM_COMPLETE;
1755					skb->csum = TCP_CKSUM(skb->data,
1756							he_vcc->pdu_len);
1757				}
1758#endif
1759				break;
1760		}
1761
1762#ifdef should_never_happen
1763		if (skb->len > vcc->qos.rxtp.max_sdu)
1764			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1765#endif
1766
1767#ifdef notdef
1768		ATM_SKB(skb)->vcc = vcc;
1769#endif
1770		spin_unlock(&he_dev->global_lock);
1771		vcc->push(vcc, skb);
1772		spin_lock(&he_dev->global_lock);
1773
1774		atomic_inc(&vcc->stats->rx);
1775
1776return_host_buffers:
1777		++pdus_assembled;
1778
1779		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1780			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1781		INIT_LIST_HEAD(&he_vcc->buffers);
1782		he_vcc->pdu_len = 0;
1783
1784next_rbrq_entry:
1785		he_dev->rbrq_head = (struct he_rbrq *)
1786				((unsigned long) he_dev->rbrq_base |
1787					RBRQ_MASK(he_dev->rbrq_head + 1));
1788
1789	}
1790	read_unlock(&vcc_sklist_lock);
1791
1792	if (updated) {
1793		if (updated > he_dev->rbrq_peak)
1794			he_dev->rbrq_peak = updated;
1795
1796		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1797						G0_RBRQ_H + (group * 16));
1798	}
1799
1800	return pdus_assembled;
1801}
1802
1803static void
1804he_service_tbrq(struct he_dev *he_dev, int group)
1805{
1806	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1807				((unsigned long)he_dev->tbrq_base |
1808					he_dev->hsp->group[group].tbrq_tail);
1809	struct he_tpd *tpd;
1810	int slot, updated = 0;
1811	struct he_tpd *__tpd;
1812
1813	/* 2.1.6 transmit buffer return queue */
1814
1815	while (he_dev->tbrq_head != tbrq_tail) {
1816		++updated;
1817
1818		HPRINTK("tbrq%d 0x%x%s%s\n",
1819			group,
1820			TBRQ_TPD(he_dev->tbrq_head),
1821			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1822			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1823		tpd = NULL;
1824		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1825			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1826				tpd = __tpd;
1827				list_del(&__tpd->entry);
1828				break;
1829			}
1830		}
1831
1832		if (tpd == NULL) {
1833			hprintk("unable to locate tpd for dma buffer %x\n",
1834						TBRQ_TPD(he_dev->tbrq_head));
1835			goto next_tbrq_entry;
1836		}
1837
1838		if (TBRQ_EOS(he_dev->tbrq_head)) {
1839			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1840				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1841			if (tpd->vcc)
1842				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1843
1844			goto next_tbrq_entry;
1845		}
1846
1847		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1848			if (tpd->iovec[slot].addr)
1849				dma_unmap_single(&he_dev->pci_dev->dev,
1850					tpd->iovec[slot].addr,
1851					tpd->iovec[slot].len & TPD_LEN_MASK,
1852							DMA_TO_DEVICE);
1853			if (tpd->iovec[slot].len & TPD_LST)
1854				break;
1855
1856		}
1857
1858		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1859			if (tpd->vcc && tpd->vcc->pop)
1860				tpd->vcc->pop(tpd->vcc, tpd->skb);
1861			else
1862				dev_kfree_skb_any(tpd->skb);
1863		}
1864
1865next_tbrq_entry:
1866		if (tpd)
1867			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1868		he_dev->tbrq_head = (struct he_tbrq *)
1869				((unsigned long) he_dev->tbrq_base |
1870					TBRQ_MASK(he_dev->tbrq_head + 1));
1871	}
1872
1873	if (updated) {
1874		if (updated > he_dev->tbrq_peak)
1875			he_dev->tbrq_peak = updated;
1876
1877		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1878						G0_TBRQ_H + (group * 16));
1879	}
1880}
1881
1882static void
1883he_service_rbpl(struct he_dev *he_dev, int group)
1884{
1885	struct he_rbp *new_tail;
1886	struct he_rbp *rbpl_head;
1887	struct he_buff *heb;
1888	dma_addr_t mapping;
1889	int i;
1890	int moved = 0;
1891
1892	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1893					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1894
1895	for (;;) {
1896		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1897						RBPL_MASK(he_dev->rbpl_tail+1));
1898
1899		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1900		if (new_tail == rbpl_head)
1901			break;
1902
1903		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1904		if (i > (RBPL_TABLE_SIZE - 1)) {
1905			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1906			if (i > (RBPL_TABLE_SIZE - 1))
1907				break;
1908		}
1909		he_dev->rbpl_hint = i + 1;
1910
1911		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1912		if (!heb)
1913			break;
1914		heb->mapping = mapping;
1915		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1916		he_dev->rbpl_virt[i] = heb;
1917		set_bit(i, he_dev->rbpl_table);
1918		new_tail->idx = i << RBP_IDX_OFFSET;
1919		new_tail->phys = mapping + offsetof(struct he_buff, data);
1920
1921		he_dev->rbpl_tail = new_tail;
1922		++moved;
1923	}
1924
1925	if (moved)
1926		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1927}
1928
1929static void
1930he_tasklet(unsigned long data)
1931{
1932	unsigned long flags;
1933	struct he_dev *he_dev = (struct he_dev *) data;
1934	int group, type;
1935	int updated = 0;
1936
1937	HPRINTK("tasklet (0x%lx)\n", data);
1938	spin_lock_irqsave(&he_dev->global_lock, flags);
1939
1940	while (he_dev->irq_head != he_dev->irq_tail) {
1941		++updated;
1942
1943		type = ITYPE_TYPE(he_dev->irq_head->isw);
1944		group = ITYPE_GROUP(he_dev->irq_head->isw);
1945
1946		switch (type) {
1947			case ITYPE_RBRQ_THRESH:
1948				HPRINTK("rbrq%d threshold\n", group);
1949				/* fall through */
1950			case ITYPE_RBRQ_TIMER:
1951				if (he_service_rbrq(he_dev, group))
1952					he_service_rbpl(he_dev, group);
1953				break;
1954			case ITYPE_TBRQ_THRESH:
1955				HPRINTK("tbrq%d threshold\n", group);
1956				/* fall through */
1957			case ITYPE_TPD_COMPLETE:
1958				he_service_tbrq(he_dev, group);
1959				break;
1960			case ITYPE_RBPL_THRESH:
1961				he_service_rbpl(he_dev, group);
1962				break;
1963			case ITYPE_RBPS_THRESH:
1964				/* shouldn't happen unless small buffers enabled */
1965				break;
1966			case ITYPE_PHY:
1967				HPRINTK("phy interrupt\n");
1968#ifdef CONFIG_ATM_HE_USE_SUNI
1969				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1970				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1971					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1972				spin_lock_irqsave(&he_dev->global_lock, flags);
1973#endif
1974				break;
1975			case ITYPE_OTHER:
1976				switch (type|group) {
1977					case ITYPE_PARITY:
1978						hprintk("parity error\n");
1979						break;
1980					case ITYPE_ABORT:
1981						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1982						break;
1983				}
1984				break;
1985			case ITYPE_TYPE(ITYPE_INVALID):
1986				/* see 8.1.1 -- check all queues */
1987
1988				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1989
1990				he_service_rbrq(he_dev, 0);
1991				he_service_rbpl(he_dev, 0);
1992				he_service_tbrq(he_dev, 0);
1993				break;
1994			default:
1995				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1996		}
1997
1998		he_dev->irq_head->isw = ITYPE_INVALID;
1999
2000		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2001	}
2002
2003	if (updated) {
2004		if (updated > he_dev->irq_peak)
2005			he_dev->irq_peak = updated;
2006
2007		he_writel(he_dev,
2008			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2009			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2010			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2011		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2012	}
2013	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2014}
2015
2016static irqreturn_t
2017he_irq_handler(int irq, void *dev_id)
2018{
2019	unsigned long flags;
2020	struct he_dev *he_dev = (struct he_dev * )dev_id;
2021	int handled = 0;
2022
2023	if (he_dev == NULL)
2024		return IRQ_NONE;
2025
2026	spin_lock_irqsave(&he_dev->global_lock, flags);
2027
2028	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2029						(*he_dev->irq_tailoffset << 2));
2030
2031	if (he_dev->irq_tail == he_dev->irq_head) {
2032		HPRINTK("tailoffset not updated?\n");
2033		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2034			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2035		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2036	}
2037
2038#ifdef DEBUG
2039	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2040		hprintk("spurious (or shared) interrupt?\n");
2041#endif
2042
2043	if (he_dev->irq_head != he_dev->irq_tail) {
2044		handled = 1;
2045		tasklet_schedule(&he_dev->tasklet);
2046		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2047		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2048	}
2049	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2050	return IRQ_RETVAL(handled);
2051
2052}
2053
2054static __inline__ void
2055__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2056{
2057	struct he_tpdrq *new_tail;
2058
2059	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2060					tpd, cid, he_dev->tpdrq_tail);
2061
2062	/* new_tail = he_dev->tpdrq_tail; */
2063	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2064					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2065
2066	/*
2067	 * check to see if we are about to set the tail == head
2068	 * if true, update the head pointer from the adapter
2069	 * to see if this is really the case (reading the queue
2070	 * head for every enqueue would be unnecessarily slow)
2071	 */
2072
2073	if (new_tail == he_dev->tpdrq_head) {
2074		he_dev->tpdrq_head = (struct he_tpdrq *)
2075			(((unsigned long)he_dev->tpdrq_base) |
2076				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2077
2078		if (new_tail == he_dev->tpdrq_head) {
2079			int slot;
2080
2081			hprintk("tpdrq full (cid 0x%x)\n", cid);
2082			/*
2083			 * FIXME
2084			 * push tpd onto a transmit backlog queue
2085			 * after service_tbrq, service the backlog
2086			 * for now, we just drop the pdu
2087			 */
2088			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2089				if (tpd->iovec[slot].addr)
2090					dma_unmap_single(&he_dev->pci_dev->dev,
2091						tpd->iovec[slot].addr,
2092						tpd->iovec[slot].len & TPD_LEN_MASK,
2093								DMA_TO_DEVICE);
2094			}
2095			if (tpd->skb) {
2096				if (tpd->vcc->pop)
2097					tpd->vcc->pop(tpd->vcc, tpd->skb);
2098				else
2099					dev_kfree_skb_any(tpd->skb);
2100				atomic_inc(&tpd->vcc->stats->tx_err);
2101			}
2102			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2103			return;
2104		}
2105	}
2106
2107	/* 2.1.5 transmit packet descriptor ready queue */
2108	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2109	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2110	he_dev->tpdrq_tail->cid = cid;
2111	wmb();
2112
2113	he_dev->tpdrq_tail = new_tail;
2114
2115	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2116	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2117}
2118
2119static int
2120he_open(struct atm_vcc *vcc)
2121{
2122	unsigned long flags;
2123	struct he_dev *he_dev = HE_DEV(vcc->dev);
2124	struct he_vcc *he_vcc;
2125	int err = 0;
2126	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2127	short vpi = vcc->vpi;
2128	int vci = vcc->vci;
2129
2130	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2131		return 0;
2132
2133	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2134
2135	set_bit(ATM_VF_ADDR, &vcc->flags);
2136
2137	cid = he_mkcid(he_dev, vpi, vci);
2138
2139	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2140	if (he_vcc == NULL) {
2141		hprintk("unable to allocate he_vcc during open\n");
2142		return -ENOMEM;
2143	}
2144
2145	INIT_LIST_HEAD(&he_vcc->buffers);
2146	he_vcc->pdu_len = 0;
2147	he_vcc->rc_index = -1;
2148
2149	init_waitqueue_head(&he_vcc->rx_waitq);
2150	init_waitqueue_head(&he_vcc->tx_waitq);
2151
2152	vcc->dev_data = he_vcc;
2153
2154	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2155		int pcr_goal;
2156
2157		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2158		if (pcr_goal == 0)
2159			pcr_goal = he_dev->atm_dev->link_rate;
2160		if (pcr_goal < 0)	/* means round down, technically */
2161			pcr_goal = -pcr_goal;
2162
2163		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2164
2165		switch (vcc->qos.aal) {
2166			case ATM_AAL5:
2167				tsr0_aal = TSR0_AAL5;
2168				tsr4 = TSR4_AAL5;
2169				break;
2170			case ATM_AAL0:
2171				tsr0_aal = TSR0_AAL0_SDU;
2172				tsr4 = TSR4_AAL0_SDU;
2173				break;
2174			default:
2175				err = -EINVAL;
2176				goto open_failed;
2177		}
2178
2179		spin_lock_irqsave(&he_dev->global_lock, flags);
2180		tsr0 = he_readl_tsr0(he_dev, cid);
2181		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2182
2183		if (TSR0_CONN_STATE(tsr0) != 0) {
2184			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2185			err = -EBUSY;
2186			goto open_failed;
2187		}
2188
2189		switch (vcc->qos.txtp.traffic_class) {
2190			case ATM_UBR:
2191				/* 2.3.3.1 open connection ubr */
2192
2193				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2194					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2195				break;
2196
2197			case ATM_CBR:
2198				/* 2.3.3.2 open connection cbr */
2199
2200				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2201				if ((he_dev->total_bw + pcr_goal)
2202					> (he_dev->atm_dev->link_rate * 9 / 10))
2203				{
2204					err = -EBUSY;
2205					goto open_failed;
2206				}
2207
2208				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2209
2210				/* find an unused cs_stper register */
2211				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2212					if (he_dev->cs_stper[reg].inuse == 0 ||
2213					    he_dev->cs_stper[reg].pcr == pcr_goal)
2214							break;
2215
2216				if (reg == HE_NUM_CS_STPER) {
2217					err = -EBUSY;
2218					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2219					goto open_failed;
2220				}
2221
2222				he_dev->total_bw += pcr_goal;
2223
2224				he_vcc->rc_index = reg;
2225				++he_dev->cs_stper[reg].inuse;
2226				he_dev->cs_stper[reg].pcr = pcr_goal;
2227
2228				clock = he_is622(he_dev) ? 66667000 : 50000000;
2229				period = clock / pcr_goal;
2230
2231				HPRINTK("rc_index = %d period = %d\n",
2232								reg, period);
2233
2234				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2235							CS_STPER0 + reg);
2236				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2237
2238				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2239							TSR0_RC_INDEX(reg);
2240
2241				break;
2242			default:
2243				err = -EINVAL;
2244				goto open_failed;
2245		}
2246
2247		spin_lock_irqsave(&he_dev->global_lock, flags);
2248
2249		he_writel_tsr0(he_dev, tsr0, cid);
2250		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2251		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2252					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2253		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2254		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2255
2256		he_writel_tsr3(he_dev, 0x0, cid);
2257		he_writel_tsr5(he_dev, 0x0, cid);
2258		he_writel_tsr6(he_dev, 0x0, cid);
2259		he_writel_tsr7(he_dev, 0x0, cid);
2260		he_writel_tsr8(he_dev, 0x0, cid);
2261		he_writel_tsr10(he_dev, 0x0, cid);
2262		he_writel_tsr11(he_dev, 0x0, cid);
2263		he_writel_tsr12(he_dev, 0x0, cid);
2264		he_writel_tsr13(he_dev, 0x0, cid);
2265		he_writel_tsr14(he_dev, 0x0, cid);
2266		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2267		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2268	}
2269
2270	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2271		unsigned aal;
2272
2273		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2274		 				&HE_VCC(vcc)->rx_waitq);
2275
2276		switch (vcc->qos.aal) {
2277			case ATM_AAL5:
2278				aal = RSR0_AAL5;
2279				break;
2280			case ATM_AAL0:
2281				aal = RSR0_RAWCELL;
2282				break;
2283			default:
2284				err = -EINVAL;
2285				goto open_failed;
2286		}
2287
2288		spin_lock_irqsave(&he_dev->global_lock, flags);
2289
2290		rsr0 = he_readl_rsr0(he_dev, cid);
2291		if (rsr0 & RSR0_OPEN_CONN) {
2292			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2293
2294			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2295			err = -EBUSY;
2296			goto open_failed;
2297		}
2298
2299		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2300		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2301		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2302				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2303
2304#ifdef USE_CHECKSUM_HW
2305		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2306			rsr0 |= RSR0_TCP_CKSUM;
2307#endif
2308
2309		he_writel_rsr4(he_dev, rsr4, cid);
2310		he_writel_rsr1(he_dev, rsr1, cid);
2311		/* 5.1.11 last parameter initialized should be
2312			  the open/closed indication in rsr0 */
2313		he_writel_rsr0(he_dev,
2314			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2315		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2316
2317		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2318	}
2319
2320open_failed:
2321
2322	if (err) {
2323		kfree(he_vcc);
2324		clear_bit(ATM_VF_ADDR, &vcc->flags);
2325	}
2326	else
2327		set_bit(ATM_VF_READY, &vcc->flags);
2328
2329	return err;
2330}
2331
2332static void
2333he_close(struct atm_vcc *vcc)
2334{
2335	unsigned long flags;
2336	DECLARE_WAITQUEUE(wait, current);
2337	struct he_dev *he_dev = HE_DEV(vcc->dev);
2338	struct he_tpd *tpd;
2339	unsigned cid;
2340	struct he_vcc *he_vcc = HE_VCC(vcc);
2341#define MAX_RETRY 30
2342	int retry = 0, sleep = 1, tx_inuse;
2343
2344	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2345
2346	clear_bit(ATM_VF_READY, &vcc->flags);
2347	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2348
2349	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2350		int timeout;
2351
2352		HPRINTK("close rx cid 0x%x\n", cid);
2353
2354		/* 2.7.2.2 close receive operation */
2355
2356		/* wait for previous close (if any) to finish */
2357
2358		spin_lock_irqsave(&he_dev->global_lock, flags);
2359		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2360			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2361			udelay(250);
2362		}
2363
2364		set_current_state(TASK_UNINTERRUPTIBLE);
2365		add_wait_queue(&he_vcc->rx_waitq, &wait);
2366
2367		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2368		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2369		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2370		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2371
2372		timeout = schedule_timeout(30*HZ);
2373
2374		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2375		set_current_state(TASK_RUNNING);
2376
2377		if (timeout == 0)
2378			hprintk("close rx timeout cid 0x%x\n", cid);
2379
2380		HPRINTK("close rx cid 0x%x complete\n", cid);
2381
2382	}
2383
2384	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2385		volatile unsigned tsr4, tsr0;
2386		int timeout;
2387
2388		HPRINTK("close tx cid 0x%x\n", cid);
2389
2390		/* 2.1.2
2391		 *
2392		 * ... the host must first stop queueing packets to the TPDRQ
2393		 * on the connection to be closed, then wait for all outstanding
2394		 * packets to be transmitted and their buffers returned to the
2395		 * TBRQ. When the last packet on the connection arrives in the
2396		 * TBRQ, the host issues the close command to the adapter.
2397		 */
2398
2399		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2400		       (retry < MAX_RETRY)) {
2401			msleep(sleep);
2402			if (sleep < 250)
2403				sleep = sleep * 2;
2404
2405			++retry;
2406		}
2407
2408		if (tx_inuse > 1)
2409			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2410
2411		/* 2.3.1.1 generic close operations with flush */
2412
2413		spin_lock_irqsave(&he_dev->global_lock, flags);
2414		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2415					/* also clears TSR4_SESSION_ENDED */
2416
2417		switch (vcc->qos.txtp.traffic_class) {
2418			case ATM_UBR:
2419				he_writel_tsr1(he_dev,
2420					TSR1_MCR(rate_to_atmf(200000))
2421					| TSR1_PCR(0), cid);
2422				break;
2423			case ATM_CBR:
2424				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2425				break;
2426		}
2427		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2428
2429		tpd = __alloc_tpd(he_dev);
2430		if (tpd == NULL) {
2431			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2432			goto close_tx_incomplete;
2433		}
2434		tpd->status |= TPD_EOS | TPD_INT;
2435		tpd->skb = NULL;
2436		tpd->vcc = vcc;
2437		wmb();
2438
2439		set_current_state(TASK_UNINTERRUPTIBLE);
2440		add_wait_queue(&he_vcc->tx_waitq, &wait);
2441		__enqueue_tpd(he_dev, tpd, cid);
2442		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2443
2444		timeout = schedule_timeout(30*HZ);
2445
2446		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2447		set_current_state(TASK_RUNNING);
2448
2449		spin_lock_irqsave(&he_dev->global_lock, flags);
2450
2451		if (timeout == 0) {
2452			hprintk("close tx timeout cid 0x%x\n", cid);
2453			goto close_tx_incomplete;
2454		}
2455
2456		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2457			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2458			udelay(250);
2459		}
2460
2461		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2462			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2463			udelay(250);
2464		}
2465
2466close_tx_incomplete:
2467
2468		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2469			int reg = he_vcc->rc_index;
2470
2471			HPRINTK("cs_stper reg = %d\n", reg);
2472
2473			if (he_dev->cs_stper[reg].inuse == 0)
2474				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2475			else
2476				--he_dev->cs_stper[reg].inuse;
2477
2478			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2479		}
2480		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2481
2482		HPRINTK("close tx cid 0x%x complete\n", cid);
2483	}
2484
2485	kfree(he_vcc);
2486
2487	clear_bit(ATM_VF_ADDR, &vcc->flags);
2488}
2489
2490static int
2491he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2492{
2493	unsigned long flags;
2494	struct he_dev *he_dev = HE_DEV(vcc->dev);
2495	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2496	struct he_tpd *tpd;
2497#ifdef USE_SCATTERGATHER
2498	int i, slot = 0;
2499#endif
2500
2501#define HE_TPD_BUFSIZE 0xffff
2502
2503	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2504
2505	if ((skb->len > HE_TPD_BUFSIZE) ||
2506	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2507		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2508		if (vcc->pop)
2509			vcc->pop(vcc, skb);
2510		else
2511			dev_kfree_skb_any(skb);
2512		atomic_inc(&vcc->stats->tx_err);
2513		return -EINVAL;
2514	}
2515
2516#ifndef USE_SCATTERGATHER
2517	if (skb_shinfo(skb)->nr_frags) {
2518		hprintk("no scatter/gather support\n");
2519		if (vcc->pop)
2520			vcc->pop(vcc, skb);
2521		else
2522			dev_kfree_skb_any(skb);
2523		atomic_inc(&vcc->stats->tx_err);
2524		return -EINVAL;
2525	}
2526#endif
2527	spin_lock_irqsave(&he_dev->global_lock, flags);
2528
2529	tpd = __alloc_tpd(he_dev);
2530	if (tpd == NULL) {
2531		if (vcc->pop)
2532			vcc->pop(vcc, skb);
2533		else
2534			dev_kfree_skb_any(skb);
2535		atomic_inc(&vcc->stats->tx_err);
2536		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2537		return -ENOMEM;
2538	}
2539
2540	if (vcc->qos.aal == ATM_AAL5)
2541		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2542	else {
2543		char *pti_clp = (void *) (skb->data + 3);
2544		int clp, pti;
2545
2546		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2547		clp = (*pti_clp & ATM_HDR_CLP);
2548		tpd->status |= TPD_CELLTYPE(pti);
2549		if (clp)
2550			tpd->status |= TPD_CLP;
2551
2552		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2553	}
2554
2555#ifdef USE_SCATTERGATHER
2556	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2557				skb_headlen(skb), DMA_TO_DEVICE);
2558	tpd->iovec[slot].len = skb_headlen(skb);
2559	++slot;
2560
2561	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2562		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2563
2564		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2565			tpd->vcc = vcc;
2566			tpd->skb = NULL;	/* not the last fragment
2567						   so dont ->push() yet */
2568			wmb();
2569
2570			__enqueue_tpd(he_dev, tpd, cid);
2571			tpd = __alloc_tpd(he_dev);
2572			if (tpd == NULL) {
2573				if (vcc->pop)
2574					vcc->pop(vcc, skb);
2575				else
2576					dev_kfree_skb_any(skb);
2577				atomic_inc(&vcc->stats->tx_err);
2578				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2579				return -ENOMEM;
2580			}
2581			tpd->status |= TPD_USERCELL;
2582			slot = 0;
2583		}
2584
2585		tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2586			(void *) page_address(frag->page) + frag->page_offset,
2587				frag->size, DMA_TO_DEVICE);
2588		tpd->iovec[slot].len = frag->size;
2589		++slot;
2590
2591	}
2592
2593	tpd->iovec[slot - 1].len |= TPD_LST;
2594#else
2595	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2596	tpd->length0 = skb->len | TPD_LST;
2597#endif
2598	tpd->status |= TPD_INT;
2599
2600	tpd->vcc = vcc;
2601	tpd->skb = skb;
2602	wmb();
2603	ATM_SKB(skb)->vcc = vcc;
2604
2605	__enqueue_tpd(he_dev, tpd, cid);
2606	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2607
2608	atomic_inc(&vcc->stats->tx);
2609
2610	return 0;
2611}
2612
2613static int
2614he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2615{
2616	unsigned long flags;
2617	struct he_dev *he_dev = HE_DEV(atm_dev);
2618	struct he_ioctl_reg reg;
2619	int err = 0;
2620
2621	switch (cmd) {
2622		case HE_GET_REG:
2623			if (!capable(CAP_NET_ADMIN))
2624				return -EPERM;
2625
2626			if (copy_from_user(&reg, arg,
2627					   sizeof(struct he_ioctl_reg)))
2628				return -EFAULT;
2629
2630			spin_lock_irqsave(&he_dev->global_lock, flags);
2631			switch (reg.type) {
2632				case HE_REGTYPE_PCI:
2633					if (reg.addr >= HE_REGMAP_SIZE) {
2634						err = -EINVAL;
2635						break;
2636					}
2637
2638					reg.val = he_readl(he_dev, reg.addr);
2639					break;
2640				case HE_REGTYPE_RCM:
2641					reg.val =
2642						he_readl_rcm(he_dev, reg.addr);
2643					break;
2644				case HE_REGTYPE_TCM:
2645					reg.val =
2646						he_readl_tcm(he_dev, reg.addr);
2647					break;
2648				case HE_REGTYPE_MBOX:
2649					reg.val =
2650						he_readl_mbox(he_dev, reg.addr);
2651					break;
2652				default:
2653					err = -EINVAL;
2654					break;
2655			}
2656			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2657			if (err == 0)
2658				if (copy_to_user(arg, &reg,
2659							sizeof(struct he_ioctl_reg)))
2660					return -EFAULT;
2661			break;
2662		default:
2663#ifdef CONFIG_ATM_HE_USE_SUNI
2664			if (atm_dev->phy && atm_dev->phy->ioctl)
2665				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2666#else /* CONFIG_ATM_HE_USE_SUNI */
2667			err = -EINVAL;
2668#endif /* CONFIG_ATM_HE_USE_SUNI */
2669			break;
2670	}
2671
2672	return err;
2673}
2674
2675static void
2676he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2677{
2678	unsigned long flags;
2679	struct he_dev *he_dev = HE_DEV(atm_dev);
2680
2681	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2682
2683	spin_lock_irqsave(&he_dev->global_lock, flags);
2684	he_writel(he_dev, val, FRAMER + (addr*4));
2685	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2686	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2687}
2688
2689
2690static unsigned char
2691he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2692{
2693	unsigned long flags;
2694	struct he_dev *he_dev = HE_DEV(atm_dev);
2695	unsigned reg;
2696
2697	spin_lock_irqsave(&he_dev->global_lock, flags);
2698	reg = he_readl(he_dev, FRAMER + (addr*4));
2699	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2700
2701	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2702	return reg;
2703}
2704
2705static int
2706he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2707{
2708	unsigned long flags;
2709	struct he_dev *he_dev = HE_DEV(dev);
2710	int left, i;
2711#ifdef notdef
2712	struct he_rbrq *rbrq_tail;
2713	struct he_tpdrq *tpdrq_head;
2714	int rbpl_head, rbpl_tail;
2715#endif
2716	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2717
2718
2719	left = *pos;
2720	if (!left--)
2721		return sprintf(page, "ATM he driver\n");
2722
2723	if (!left--)
2724		return sprintf(page, "%s%s\n\n",
2725			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2726
2727	if (!left--)
2728		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2729
2730	spin_lock_irqsave(&he_dev->global_lock, flags);
2731	mcc += he_readl(he_dev, MCC);
2732	oec += he_readl(he_dev, OEC);
2733	dcc += he_readl(he_dev, DCC);
2734	cec += he_readl(he_dev, CEC);
2735	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2736
2737	if (!left--)
2738		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2739							mcc, oec, dcc, cec);
2740
2741	if (!left--)
2742		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2743				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2744
2745	if (!left--)
2746		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2747						CONFIG_TPDRQ_SIZE);
2748
2749	if (!left--)
2750		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2751				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2752
2753	if (!left--)
2754		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2755					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2756
2757
2758#ifdef notdef
2759	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2760	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2761
2762	inuse = rbpl_head - rbpl_tail;
2763	if (inuse < 0)
2764		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2765	inuse /= sizeof(struct he_rbp);
2766
2767	if (!left--)
2768		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2769						CONFIG_RBPL_SIZE, inuse);
2770#endif
2771
2772	if (!left--)
2773		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2774
2775	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2776		if (!left--)
2777			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2778						he_dev->cs_stper[i].pcr,
2779						he_dev->cs_stper[i].inuse);
2780
2781	if (!left--)
2782		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2783			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2784
2785	return 0;
2786}
2787
2788/* eeprom routines  -- see 4.7 */
2789
2790static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2791{
2792	u32 val = 0, tmp_read = 0;
2793	int i, j = 0;
2794	u8 byte_read = 0;
2795
2796	val = readl(he_dev->membase + HOST_CNTL);
2797	val &= 0xFFFFE0FF;
2798
2799	/* Turn on write enable */
2800	val |= 0x800;
2801	he_writel(he_dev, val, HOST_CNTL);
2802
2803	/* Send READ instruction */
2804	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2805		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2806		udelay(EEPROM_DELAY);
2807	}
2808
2809	/* Next, we need to send the byte address to read from */
2810	for (i = 7; i >= 0; i--) {
2811		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2812		udelay(EEPROM_DELAY);
2813		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2814		udelay(EEPROM_DELAY);
2815	}
2816
2817	j = 0;
2818
2819	val &= 0xFFFFF7FF;      /* Turn off write enable */
2820	he_writel(he_dev, val, HOST_CNTL);
2821
2822	/* Now, we can read data from the EEPROM by clocking it in */
2823	for (i = 7; i >= 0; i--) {
2824		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2825		udelay(EEPROM_DELAY);
2826		tmp_read = he_readl(he_dev, HOST_CNTL);
2827		byte_read |= (unsigned char)
2828			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2829		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2830		udelay(EEPROM_DELAY);
2831	}
2832
2833	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2834	udelay(EEPROM_DELAY);
2835
2836	return byte_read;
2837}
2838
2839MODULE_LICENSE("GPL");
2840MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2841MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2842module_param(disable64, bool, 0);
2843MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2844module_param(nvpibits, short, 0);
2845MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2846module_param(nvcibits, short, 0);
2847MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2848module_param(rx_skb_reserve, short, 0);
2849MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2850module_param(irq_coalesce, bool, 0);
2851MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2852module_param(sdh, bool, 0);
2853MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2854
2855static struct pci_device_id he_pci_tbl[] = {
2856	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2857	{ 0, }
2858};
2859
2860MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2861
2862static struct pci_driver he_driver = {
2863	.name =		"he",
2864	.probe =	he_init_one,
2865	.remove =	he_remove_one,
2866	.id_table =	he_pci_tbl,
2867};
2868
2869module_pci_driver(he_driver);
2870