1/*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20 */
21
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/pci.h>
35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/timer.h>
40#include <linux/usb.h>
41#include <linux/usb/ch9.h>
42#include <linux/usb/gadget.h>
43
44#include <asm/byteorder.h>
45#include <asm/unaligned.h>
46
47#include "net2272.h"
48
49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
50
51static const char driver_name[] = "net2272";
52static const char driver_vers[] = "2006 October 17/mainline";
53static const char driver_desc[] = DRIVER_DESC;
54
55static const char ep0name[] = "ep0";
56static const char * const ep_name[] = {
57	ep0name,
58	"ep-a", "ep-b", "ep-c",
59};
60
61#ifdef CONFIG_USB_NET2272_DMA
62/*
63 * use_dma: the NET2272 can use an external DMA controller.
64 * Note that since there is no generic DMA api, some functions,
65 * notably request_dma, start_dma, and cancel_dma will need to be
66 * modified for your platform's particular dma controller.
67 *
68 * If use_dma is disabled, pio will be used instead.
69 */
70static bool use_dma = 0;
71module_param(use_dma, bool, 0644);
72
73/*
74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
75 * The NET2272 can only use dma for a single endpoint at a time.
76 * At some point this could be modified to allow either endpoint
77 * to take control of dma as it becomes available.
78 *
79 * Note that DMA should not be used on OUT endpoints unless it can
80 * be guaranteed that no short packets will arrive on an IN endpoint
81 * while the DMA operation is pending.  Otherwise the OUT DMA will
82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
83 */
84static ushort dma_ep = 1;
85module_param(dma_ep, ushort, 0644);
86
87/*
88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
89 *	mode 0 == Slow DREQ mode
90 *	mode 1 == Fast DREQ mode
91 *	mode 2 == Burst mode
92 */
93static ushort dma_mode = 2;
94module_param(dma_mode, ushort, 0644);
95#else
96#define use_dma 0
97#define dma_ep 1
98#define dma_mode 2
99#endif
100
101/*
102 * fifo_mode: net2272 buffer configuration:
103 *      mode 0 == ep-{a,b,c} 512db each
104 *      mode 1 == ep-a 1k, ep-{b,c} 512db
105 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
106 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
107 */
108static ushort fifo_mode = 0;
109module_param(fifo_mode, ushort, 0644);
110
111/*
112 * enable_suspend: When enabled, the driver will respond to
113 * USB suspend requests by powering down the NET2272.  Otherwise,
114 * USB suspend requests will be ignored.  This is acceptible for
115 * self-powered devices.  For bus powered devices set this to 1.
116 */
117static ushort enable_suspend = 0;
118module_param(enable_suspend, ushort, 0644);
119
120static void assert_out_naking(struct net2272_ep *ep, const char *where)
121{
122	u8 tmp;
123
124#ifndef DEBUG
125	return;
126#endif
127
128	tmp = net2272_ep_read(ep, EP_STAT0);
129	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
130		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
131			ep->ep.name, where, tmp);
132		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
133	}
134}
135#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
136
137static void stop_out_naking(struct net2272_ep *ep)
138{
139	u8 tmp = net2272_ep_read(ep, EP_STAT0);
140
141	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
142		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
143}
144
145#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
146
147static char *type_string(u8 bmAttributes)
148{
149	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
150	case USB_ENDPOINT_XFER_BULK: return "bulk";
151	case USB_ENDPOINT_XFER_ISOC: return "iso";
152	case USB_ENDPOINT_XFER_INT:  return "intr";
153	default:                     return "control";
154	}
155}
156
157static char *buf_state_string(unsigned state)
158{
159	switch (state) {
160	case BUFF_FREE:  return "free";
161	case BUFF_VALID: return "valid";
162	case BUFF_LCL:   return "local";
163	case BUFF_USB:   return "usb";
164	default:         return "unknown";
165	}
166}
167
168static char *dma_mode_string(void)
169{
170	if (!use_dma)
171		return "PIO";
172	switch (dma_mode) {
173	case 0:  return "SLOW DREQ";
174	case 1:  return "FAST DREQ";
175	case 2:  return "BURST";
176	default: return "invalid";
177	}
178}
179
180static void net2272_dequeue_all(struct net2272_ep *);
181static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
182static int net2272_fifo_status(struct usb_ep *);
183
184static struct usb_ep_ops net2272_ep_ops;
185
186/*---------------------------------------------------------------------------*/
187
188static int
189net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
190{
191	struct net2272 *dev;
192	struct net2272_ep *ep;
193	u32 max;
194	u8 tmp;
195	unsigned long flags;
196
197	ep = container_of(_ep, struct net2272_ep, ep);
198	if (!_ep || !desc || ep->desc || _ep->name == ep0name
199			|| desc->bDescriptorType != USB_DT_ENDPOINT)
200		return -EINVAL;
201	dev = ep->dev;
202	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
203		return -ESHUTDOWN;
204
205	max = usb_endpoint_maxp(desc) & 0x1fff;
206
207	spin_lock_irqsave(&dev->lock, flags);
208	_ep->maxpacket = max & 0x7fff;
209	ep->desc = desc;
210
211	/* net2272_ep_reset() has already been called */
212	ep->stopped = 0;
213	ep->wedged = 0;
214
215	/* set speed-dependent max packet */
216	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
217	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
218
219	/* set type, direction, address; reset fifo counters */
220	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
221	tmp = usb_endpoint_type(desc);
222	if (usb_endpoint_xfer_bulk(desc)) {
223		/* catch some particularly blatant driver bugs */
224		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
225		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
226			spin_unlock_irqrestore(&dev->lock, flags);
227			return -ERANGE;
228		}
229	}
230	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
231	tmp <<= ENDPOINT_TYPE;
232	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
233	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
234	tmp |= (1 << ENDPOINT_ENABLE);
235
236	/* for OUT transfers, block the rx fifo until a read is posted */
237	ep->is_in = usb_endpoint_dir_in(desc);
238	if (!ep->is_in)
239		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
240
241	net2272_ep_write(ep, EP_CFG, tmp);
242
243	/* enable irqs */
244	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
245	net2272_write(dev, IRQENB0, tmp);
246
247	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
248		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
249		| net2272_ep_read(ep, EP_IRQENB);
250	net2272_ep_write(ep, EP_IRQENB, tmp);
251
252	tmp = desc->bEndpointAddress;
253	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
254		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
255		type_string(desc->bmAttributes), max,
256		net2272_ep_read(ep, EP_CFG));
257
258	spin_unlock_irqrestore(&dev->lock, flags);
259	return 0;
260}
261
262static void net2272_ep_reset(struct net2272_ep *ep)
263{
264	u8 tmp;
265
266	ep->desc = NULL;
267	INIT_LIST_HEAD(&ep->queue);
268
269	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
270	ep->ep.ops = &net2272_ep_ops;
271
272	/* disable irqs, endpoint */
273	net2272_ep_write(ep, EP_IRQENB, 0);
274
275	/* init to our chosen defaults, notably so that we NAK OUT
276	 * packets until the driver queues a read.
277	 */
278	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
279	net2272_ep_write(ep, EP_RSPSET, tmp);
280
281	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
282	if (ep->num != 0)
283		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
284
285	net2272_ep_write(ep, EP_RSPCLR, tmp);
286
287	/* scrub most status bits, and flush any fifo state */
288	net2272_ep_write(ep, EP_STAT0,
289			  (1 << DATA_IN_TOKEN_INTERRUPT)
290			| (1 << DATA_OUT_TOKEN_INTERRUPT)
291			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
292			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
293			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
294
295	net2272_ep_write(ep, EP_STAT1,
296			    (1 << TIMEOUT)
297			  | (1 << USB_OUT_ACK_SENT)
298			  | (1 << USB_OUT_NAK_SENT)
299			  | (1 << USB_IN_ACK_RCVD)
300			  | (1 << USB_IN_NAK_SENT)
301			  | (1 << USB_STALL_SENT)
302			  | (1 << LOCAL_OUT_ZLP)
303			  | (1 << BUFFER_FLUSH));
304
305	/* fifo size is handled seperately */
306}
307
308static int net2272_disable(struct usb_ep *_ep)
309{
310	struct net2272_ep *ep;
311	unsigned long flags;
312
313	ep = container_of(_ep, struct net2272_ep, ep);
314	if (!_ep || !ep->desc || _ep->name == ep0name)
315		return -EINVAL;
316
317	spin_lock_irqsave(&ep->dev->lock, flags);
318	net2272_dequeue_all(ep);
319	net2272_ep_reset(ep);
320
321	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
322
323	spin_unlock_irqrestore(&ep->dev->lock, flags);
324	return 0;
325}
326
327/*---------------------------------------------------------------------------*/
328
329static struct usb_request *
330net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
331{
332	struct net2272_ep *ep;
333	struct net2272_request *req;
334
335	if (!_ep)
336		return NULL;
337	ep = container_of(_ep, struct net2272_ep, ep);
338
339	req = kzalloc(sizeof(*req), gfp_flags);
340	if (!req)
341		return NULL;
342
343	INIT_LIST_HEAD(&req->queue);
344
345	return &req->req;
346}
347
348static void
349net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
350{
351	struct net2272_ep *ep;
352	struct net2272_request *req;
353
354	ep = container_of(_ep, struct net2272_ep, ep);
355	if (!_ep || !_req)
356		return;
357
358	req = container_of(_req, struct net2272_request, req);
359	WARN_ON(!list_empty(&req->queue));
360	kfree(req);
361}
362
363static void
364net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
365{
366	struct net2272 *dev;
367	unsigned stopped = ep->stopped;
368
369	if (ep->num == 0) {
370		if (ep->dev->protocol_stall) {
371			ep->stopped = 1;
372			set_halt(ep);
373		}
374		allow_status(ep);
375	}
376
377	list_del_init(&req->queue);
378
379	if (req->req.status == -EINPROGRESS)
380		req->req.status = status;
381	else
382		status = req->req.status;
383
384	dev = ep->dev;
385	if (use_dma && ep->dma)
386		usb_gadget_unmap_request(&dev->gadget, &req->req,
387				ep->is_in);
388
389	if (status && status != -ESHUTDOWN)
390		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
391			ep->ep.name, &req->req, status,
392			req->req.actual, req->req.length, req->req.buf);
393
394	/* don't modify queue heads during completion callback */
395	ep->stopped = 1;
396	spin_unlock(&dev->lock);
397	usb_gadget_giveback_request(&ep->ep, &req->req);
398	spin_lock(&dev->lock);
399	ep->stopped = stopped;
400}
401
402static int
403net2272_write_packet(struct net2272_ep *ep, u8 *buf,
404	struct net2272_request *req, unsigned max)
405{
406	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
407	u16 *bufp;
408	unsigned length, count;
409	u8 tmp;
410
411	length = min(req->req.length - req->req.actual, max);
412	req->req.actual += length;
413
414	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
415		ep->ep.name, req, max, length,
416		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
417
418	count = length;
419	bufp = (u16 *)buf;
420
421	while (likely(count >= 2)) {
422		/* no byte-swap required; chip endian set during init */
423		writew(*bufp++, ep_data);
424		count -= 2;
425	}
426	buf = (u8 *)bufp;
427
428	/* write final byte by placing the NET2272 into 8-bit mode */
429	if (unlikely(count)) {
430		tmp = net2272_read(ep->dev, LOCCTL);
431		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
432		writeb(*buf, ep_data);
433		net2272_write(ep->dev, LOCCTL, tmp);
434	}
435	return length;
436}
437
438/* returns: 0: still running, 1: completed, negative: errno */
439static int
440net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
441{
442	u8 *buf;
443	unsigned count, max;
444	int status;
445
446	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
447		ep->ep.name, req->req.actual, req->req.length);
448
449	/*
450	 * Keep loading the endpoint until the final packet is loaded,
451	 * or the endpoint buffer is full.
452	 */
453 top:
454	/*
455	 * Clear interrupt status
456	 *  - Packet Transmitted interrupt will become set again when the
457	 *    host successfully takes another packet
458	 */
459	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
460	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
461		buf = req->req.buf + req->req.actual;
462		prefetch(buf);
463
464		/* force pagesel */
465		net2272_ep_read(ep, EP_STAT0);
466
467		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
468			(net2272_ep_read(ep, EP_AVAIL0));
469
470		if (max < ep->ep.maxpacket)
471			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
472				| (net2272_ep_read(ep, EP_AVAIL0));
473
474		count = net2272_write_packet(ep, buf, req, max);
475		/* see if we are done */
476		if (req->req.length == req->req.actual) {
477			/* validate short or zlp packet */
478			if (count < ep->ep.maxpacket)
479				set_fifo_bytecount(ep, 0);
480			net2272_done(ep, req, 0);
481
482			if (!list_empty(&ep->queue)) {
483				req = list_entry(ep->queue.next,
484						struct net2272_request,
485						queue);
486				status = net2272_kick_dma(ep, req);
487
488				if (status < 0)
489					if ((net2272_ep_read(ep, EP_STAT0)
490							& (1 << BUFFER_EMPTY)))
491						goto top;
492			}
493			return 1;
494		}
495		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
496	}
497	return 0;
498}
499
500static void
501net2272_out_flush(struct net2272_ep *ep)
502{
503	ASSERT_OUT_NAKING(ep);
504
505	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
506			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
507	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
508}
509
510static int
511net2272_read_packet(struct net2272_ep *ep, u8 *buf,
512	struct net2272_request *req, unsigned avail)
513{
514	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
515	unsigned is_short;
516	u16 *bufp;
517
518	req->req.actual += avail;
519
520	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
521		ep->ep.name, req, avail,
522		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
523
524	is_short = (avail < ep->ep.maxpacket);
525
526	if (unlikely(avail == 0)) {
527		/* remove any zlp from the buffer */
528		(void)readw(ep_data);
529		return is_short;
530	}
531
532	/* Ensure we get the final byte */
533	if (unlikely(avail % 2))
534		avail++;
535	bufp = (u16 *)buf;
536
537	do {
538		*bufp++ = readw(ep_data);
539		avail -= 2;
540	} while (avail);
541
542	/*
543	 * To avoid false endpoint available race condition must read
544	 * ep stat0 twice in the case of a short transfer
545	 */
546	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
547		net2272_ep_read(ep, EP_STAT0);
548
549	return is_short;
550}
551
552static int
553net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
554{
555	u8 *buf;
556	unsigned is_short;
557	int count;
558	int tmp;
559	int cleanup = 0;
560	int status = -1;
561
562	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
563		ep->ep.name, req->req.actual, req->req.length);
564
565 top:
566	do {
567		buf = req->req.buf + req->req.actual;
568		prefetchw(buf);
569
570		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
571			| net2272_ep_read(ep, EP_AVAIL0);
572
573		net2272_ep_write(ep, EP_STAT0,
574			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
575			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
576
577		tmp = req->req.length - req->req.actual;
578
579		if (count > tmp) {
580			if ((tmp % ep->ep.maxpacket) != 0) {
581				dev_err(ep->dev->dev,
582					"%s out fifo %d bytes, expected %d\n",
583					ep->ep.name, count, tmp);
584				cleanup = 1;
585			}
586			count = (tmp > 0) ? tmp : 0;
587		}
588
589		is_short = net2272_read_packet(ep, buf, req, count);
590
591		/* completion */
592		if (unlikely(cleanup || is_short ||
593				((req->req.actual == req->req.length)
594				 && !req->req.zero))) {
595
596			if (cleanup) {
597				net2272_out_flush(ep);
598				net2272_done(ep, req, -EOVERFLOW);
599			} else
600				net2272_done(ep, req, 0);
601
602			/* re-initialize endpoint transfer registers
603			 * otherwise they may result in erroneous pre-validation
604			 * for subsequent control reads
605			 */
606			if (unlikely(ep->num == 0)) {
607				net2272_ep_write(ep, EP_TRANSFER2, 0);
608				net2272_ep_write(ep, EP_TRANSFER1, 0);
609				net2272_ep_write(ep, EP_TRANSFER0, 0);
610			}
611
612			if (!list_empty(&ep->queue)) {
613				req = list_entry(ep->queue.next,
614					struct net2272_request, queue);
615				status = net2272_kick_dma(ep, req);
616				if ((status < 0) &&
617				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
618					goto top;
619			}
620			return 1;
621		}
622	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
623
624	return 0;
625}
626
627static void
628net2272_pio_advance(struct net2272_ep *ep)
629{
630	struct net2272_request *req;
631
632	if (unlikely(list_empty(&ep->queue)))
633		return;
634
635	req = list_entry(ep->queue.next, struct net2272_request, queue);
636	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
637}
638
639/* returns 0 on success, else negative errno */
640static int
641net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
642	unsigned len, unsigned dir)
643{
644	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
645		ep, buf, len, dir);
646
647	/* The NET2272 only supports a single dma channel */
648	if (dev->dma_busy)
649		return -EBUSY;
650	/*
651	 * EP_TRANSFER (used to determine the number of bytes received
652	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
653	 */
654	if ((dir == 1) && (len > 0x1000000))
655		return -EINVAL;
656
657	dev->dma_busy = 1;
658
659	/* initialize platform's dma */
660#ifdef CONFIG_PCI
661	/* NET2272 addr, buffer addr, length, etc. */
662	switch (dev->dev_id) {
663	case PCI_DEVICE_ID_RDK1:
664		/* Setup PLX 9054 DMA mode */
665		writel((1 << LOCAL_BUS_WIDTH) |
666			(1 << TA_READY_INPUT_ENABLE) |
667			(0 << LOCAL_BURST_ENABLE) |
668			(1 << DONE_INTERRUPT_ENABLE) |
669			(1 << LOCAL_ADDRESSING_MODE) |
670			(1 << DEMAND_MODE) |
671			(1 << DMA_EOT_ENABLE) |
672			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
673			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
674			dev->rdk1.plx9054_base_addr + DMAMODE0);
675
676		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
677		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
678		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
679		writel((dir << DIRECTION_OF_TRANSFER) |
680			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
681			dev->rdk1.plx9054_base_addr + DMADPR0);
682		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
683			readl(dev->rdk1.plx9054_base_addr + INTCSR),
684			dev->rdk1.plx9054_base_addr + INTCSR);
685
686		break;
687	}
688#endif
689
690	net2272_write(dev, DMAREQ,
691		(0 << DMA_BUFFER_VALID) |
692		(1 << DMA_REQUEST_ENABLE) |
693		(1 << DMA_CONTROL_DACK) |
694		(dev->dma_eot_polarity << EOT_POLARITY) |
695		(dev->dma_dack_polarity << DACK_POLARITY) |
696		(dev->dma_dreq_polarity << DREQ_POLARITY) |
697		((ep >> 1) << DMA_ENDPOINT_SELECT));
698
699	(void) net2272_read(dev, SCRATCH);
700
701	return 0;
702}
703
704static void
705net2272_start_dma(struct net2272 *dev)
706{
707	/* start platform's dma controller */
708#ifdef CONFIG_PCI
709	switch (dev->dev_id) {
710	case PCI_DEVICE_ID_RDK1:
711		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
712			dev->rdk1.plx9054_base_addr + DMACSR0);
713		break;
714	}
715#endif
716}
717
718/* returns 0 on success, else negative errno */
719static int
720net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
721{
722	unsigned size;
723	u8 tmp;
724
725	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
726		return -EINVAL;
727
728	/* don't use dma for odd-length transfers
729	 * otherwise, we'd need to deal with the last byte with pio
730	 */
731	if (req->req.length & 1)
732		return -EINVAL;
733
734	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
735		ep->ep.name, req, (unsigned long long) req->req.dma);
736
737	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
738
739	/* The NET2272 can only use DMA on one endpoint at a time */
740	if (ep->dev->dma_busy)
741		return -EBUSY;
742
743	/* Make sure we only DMA an even number of bytes (we'll use
744	 * pio to complete the transfer)
745	 */
746	size = req->req.length;
747	size &= ~1;
748
749	/* device-to-host transfer */
750	if (ep->is_in) {
751		/* initialize platform's dma controller */
752		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
753			/* unable to obtain DMA channel; return error and use pio mode */
754			return -EBUSY;
755		req->req.actual += size;
756
757	/* host-to-device transfer */
758	} else {
759		tmp = net2272_ep_read(ep, EP_STAT0);
760
761		/* initialize platform's dma controller */
762		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
763			/* unable to obtain DMA channel; return error and use pio mode */
764			return -EBUSY;
765
766		if (!(tmp & (1 << BUFFER_EMPTY)))
767			ep->not_empty = 1;
768		else
769			ep->not_empty = 0;
770
771
772		/* allow the endpoint's buffer to fill */
773		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
774
775		/* this transfer completed and data's already in the fifo
776		 * return error so pio gets used.
777		 */
778		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
779
780			/* deassert dreq */
781			net2272_write(ep->dev, DMAREQ,
782				(0 << DMA_BUFFER_VALID) |
783				(0 << DMA_REQUEST_ENABLE) |
784				(1 << DMA_CONTROL_DACK) |
785				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
786				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
787				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
788				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
789
790			return -EBUSY;
791		}
792	}
793
794	/* Don't use per-packet interrupts: use dma interrupts only */
795	net2272_ep_write(ep, EP_IRQENB, 0);
796
797	net2272_start_dma(ep->dev);
798
799	return 0;
800}
801
802static void net2272_cancel_dma(struct net2272 *dev)
803{
804#ifdef CONFIG_PCI
805	switch (dev->dev_id) {
806	case PCI_DEVICE_ID_RDK1:
807		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
808		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
809		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
810		         (1 << CHANNEL_DONE)))
811			continue;	/* wait for dma to stabalize */
812
813		/* dma abort generates an interrupt */
814		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
815			dev->rdk1.plx9054_base_addr + DMACSR0);
816		break;
817	}
818#endif
819
820	dev->dma_busy = 0;
821}
822
823/*---------------------------------------------------------------------------*/
824
825static int
826net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
827{
828	struct net2272_request *req;
829	struct net2272_ep *ep;
830	struct net2272 *dev;
831	unsigned long flags;
832	int status = -1;
833	u8 s;
834
835	req = container_of(_req, struct net2272_request, req);
836	if (!_req || !_req->complete || !_req->buf
837			|| !list_empty(&req->queue))
838		return -EINVAL;
839	ep = container_of(_ep, struct net2272_ep, ep);
840	if (!_ep || (!ep->desc && ep->num != 0))
841		return -EINVAL;
842	dev = ep->dev;
843	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
844		return -ESHUTDOWN;
845
846	/* set up dma mapping in case the caller didn't */
847	if (use_dma && ep->dma) {
848		status = usb_gadget_map_request(&dev->gadget, _req,
849				ep->is_in);
850		if (status)
851			return status;
852	}
853
854	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
855		_ep->name, _req, _req->length, _req->buf,
856		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
857
858	spin_lock_irqsave(&dev->lock, flags);
859
860	_req->status = -EINPROGRESS;
861	_req->actual = 0;
862
863	/* kickstart this i/o queue? */
864	if (list_empty(&ep->queue) && !ep->stopped) {
865		/* maybe there's no control data, just status ack */
866		if (ep->num == 0 && _req->length == 0) {
867			net2272_done(ep, req, 0);
868			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
869			goto done;
870		}
871
872		/* Return zlp, don't let it block subsequent packets */
873		s = net2272_ep_read(ep, EP_STAT0);
874		if (s & (1 << BUFFER_EMPTY)) {
875			/* Buffer is empty check for a blocking zlp, handle it */
876			if ((s & (1 << NAK_OUT_PACKETS)) &&
877			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
878				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
879				/*
880				 * Request is going to terminate with a short packet ...
881				 * hope the client is ready for it!
882				 */
883				status = net2272_read_fifo(ep, req);
884				/* clear short packet naking */
885				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
886				goto done;
887			}
888		}
889
890		/* try dma first */
891		status = net2272_kick_dma(ep, req);
892
893		if (status < 0) {
894			/* dma failed (most likely in use by another endpoint)
895			 * fallback to pio
896			 */
897			status = 0;
898
899			if (ep->is_in)
900				status = net2272_write_fifo(ep, req);
901			else {
902				s = net2272_ep_read(ep, EP_STAT0);
903				if ((s & (1 << BUFFER_EMPTY)) == 0)
904					status = net2272_read_fifo(ep, req);
905			}
906
907			if (unlikely(status != 0)) {
908				if (status > 0)
909					status = 0;
910				req = NULL;
911			}
912		}
913	}
914	if (likely(req))
915		list_add_tail(&req->queue, &ep->queue);
916
917	if (likely(!list_empty(&ep->queue)))
918		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
919 done:
920	spin_unlock_irqrestore(&dev->lock, flags);
921
922	return 0;
923}
924
925/* dequeue ALL requests */
926static void
927net2272_dequeue_all(struct net2272_ep *ep)
928{
929	struct net2272_request *req;
930
931	/* called with spinlock held */
932	ep->stopped = 1;
933
934	while (!list_empty(&ep->queue)) {
935		req = list_entry(ep->queue.next,
936				struct net2272_request,
937				queue);
938		net2272_done(ep, req, -ESHUTDOWN);
939	}
940}
941
942/* dequeue JUST ONE request */
943static int
944net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945{
946	struct net2272_ep *ep;
947	struct net2272_request *req;
948	unsigned long flags;
949	int stopped;
950
951	ep = container_of(_ep, struct net2272_ep, ep);
952	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
953		return -EINVAL;
954
955	spin_lock_irqsave(&ep->dev->lock, flags);
956	stopped = ep->stopped;
957	ep->stopped = 1;
958
959	/* make sure it's still queued on this endpoint */
960	list_for_each_entry(req, &ep->queue, queue) {
961		if (&req->req == _req)
962			break;
963	}
964	if (&req->req != _req) {
965		spin_unlock_irqrestore(&ep->dev->lock, flags);
966		return -EINVAL;
967	}
968
969	/* queue head may be partially complete */
970	if (ep->queue.next == &req->queue) {
971		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
972		net2272_done(ep, req, -ECONNRESET);
973	}
974	req = NULL;
975	ep->stopped = stopped;
976
977	spin_unlock_irqrestore(&ep->dev->lock, flags);
978	return 0;
979}
980
981/*---------------------------------------------------------------------------*/
982
983static int
984net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
985{
986	struct net2272_ep *ep;
987	unsigned long flags;
988	int ret = 0;
989
990	ep = container_of(_ep, struct net2272_ep, ep);
991	if (!_ep || (!ep->desc && ep->num != 0))
992		return -EINVAL;
993	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
994		return -ESHUTDOWN;
995	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
996		return -EINVAL;
997
998	spin_lock_irqsave(&ep->dev->lock, flags);
999	if (!list_empty(&ep->queue))
1000		ret = -EAGAIN;
1001	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1002		ret = -EAGAIN;
1003	else {
1004		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1005			value ? "set" : "clear",
1006			wedged ? "wedge" : "halt");
1007		/* set/clear */
1008		if (value) {
1009			if (ep->num == 0)
1010				ep->dev->protocol_stall = 1;
1011			else
1012				set_halt(ep);
1013			if (wedged)
1014				ep->wedged = 1;
1015		} else {
1016			clear_halt(ep);
1017			ep->wedged = 0;
1018		}
1019	}
1020	spin_unlock_irqrestore(&ep->dev->lock, flags);
1021
1022	return ret;
1023}
1024
1025static int
1026net2272_set_halt(struct usb_ep *_ep, int value)
1027{
1028	return net2272_set_halt_and_wedge(_ep, value, 0);
1029}
1030
1031static int
1032net2272_set_wedge(struct usb_ep *_ep)
1033{
1034	if (!_ep || _ep->name == ep0name)
1035		return -EINVAL;
1036	return net2272_set_halt_and_wedge(_ep, 1, 1);
1037}
1038
1039static int
1040net2272_fifo_status(struct usb_ep *_ep)
1041{
1042	struct net2272_ep *ep;
1043	u16 avail;
1044
1045	ep = container_of(_ep, struct net2272_ep, ep);
1046	if (!_ep || (!ep->desc && ep->num != 0))
1047		return -ENODEV;
1048	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1049		return -ESHUTDOWN;
1050
1051	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1052	avail |= net2272_ep_read(ep, EP_AVAIL0);
1053	if (avail > ep->fifo_size)
1054		return -EOVERFLOW;
1055	if (ep->is_in)
1056		avail = ep->fifo_size - avail;
1057	return avail;
1058}
1059
1060static void
1061net2272_fifo_flush(struct usb_ep *_ep)
1062{
1063	struct net2272_ep *ep;
1064
1065	ep = container_of(_ep, struct net2272_ep, ep);
1066	if (!_ep || (!ep->desc && ep->num != 0))
1067		return;
1068	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1069		return;
1070
1071	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1072}
1073
1074static struct usb_ep_ops net2272_ep_ops = {
1075	.enable        = net2272_enable,
1076	.disable       = net2272_disable,
1077
1078	.alloc_request = net2272_alloc_request,
1079	.free_request  = net2272_free_request,
1080
1081	.queue         = net2272_queue,
1082	.dequeue       = net2272_dequeue,
1083
1084	.set_halt      = net2272_set_halt,
1085	.set_wedge     = net2272_set_wedge,
1086	.fifo_status   = net2272_fifo_status,
1087	.fifo_flush    = net2272_fifo_flush,
1088};
1089
1090/*---------------------------------------------------------------------------*/
1091
1092static int
1093net2272_get_frame(struct usb_gadget *_gadget)
1094{
1095	struct net2272 *dev;
1096	unsigned long flags;
1097	u16 ret;
1098
1099	if (!_gadget)
1100		return -ENODEV;
1101	dev = container_of(_gadget, struct net2272, gadget);
1102	spin_lock_irqsave(&dev->lock, flags);
1103
1104	ret = net2272_read(dev, FRAME1) << 8;
1105	ret |= net2272_read(dev, FRAME0);
1106
1107	spin_unlock_irqrestore(&dev->lock, flags);
1108	return ret;
1109}
1110
1111static int
1112net2272_wakeup(struct usb_gadget *_gadget)
1113{
1114	struct net2272 *dev;
1115	u8 tmp;
1116	unsigned long flags;
1117
1118	if (!_gadget)
1119		return 0;
1120	dev = container_of(_gadget, struct net2272, gadget);
1121
1122	spin_lock_irqsave(&dev->lock, flags);
1123	tmp = net2272_read(dev, USBCTL0);
1124	if (tmp & (1 << IO_WAKEUP_ENABLE))
1125		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1126
1127	spin_unlock_irqrestore(&dev->lock, flags);
1128
1129	return 0;
1130}
1131
1132static int
1133net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1134{
1135	if (!_gadget)
1136		return -ENODEV;
1137
1138	_gadget->is_selfpowered = (value != 0);
1139
1140	return 0;
1141}
1142
1143static int
1144net2272_pullup(struct usb_gadget *_gadget, int is_on)
1145{
1146	struct net2272 *dev;
1147	u8 tmp;
1148	unsigned long flags;
1149
1150	if (!_gadget)
1151		return -ENODEV;
1152	dev = container_of(_gadget, struct net2272, gadget);
1153
1154	spin_lock_irqsave(&dev->lock, flags);
1155	tmp = net2272_read(dev, USBCTL0);
1156	dev->softconnect = (is_on != 0);
1157	if (is_on)
1158		tmp |= (1 << USB_DETECT_ENABLE);
1159	else
1160		tmp &= ~(1 << USB_DETECT_ENABLE);
1161	net2272_write(dev, USBCTL0, tmp);
1162	spin_unlock_irqrestore(&dev->lock, flags);
1163
1164	return 0;
1165}
1166
1167static int net2272_start(struct usb_gadget *_gadget,
1168		struct usb_gadget_driver *driver);
1169static int net2272_stop(struct usb_gadget *_gadget);
1170
1171static const struct usb_gadget_ops net2272_ops = {
1172	.get_frame	= net2272_get_frame,
1173	.wakeup		= net2272_wakeup,
1174	.set_selfpowered = net2272_set_selfpowered,
1175	.pullup		= net2272_pullup,
1176	.udc_start	= net2272_start,
1177	.udc_stop	= net2272_stop,
1178};
1179
1180/*---------------------------------------------------------------------------*/
1181
1182static ssize_t
1183registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1184{
1185	struct net2272 *dev;
1186	char *next;
1187	unsigned size, t;
1188	unsigned long flags;
1189	u8 t1, t2;
1190	int i;
1191	const char *s;
1192
1193	dev = dev_get_drvdata(_dev);
1194	next = buf;
1195	size = PAGE_SIZE;
1196	spin_lock_irqsave(&dev->lock, flags);
1197
1198	if (dev->driver)
1199		s = dev->driver->driver.name;
1200	else
1201		s = "(none)";
1202
1203	/* Main Control Registers */
1204	t = scnprintf(next, size, "%s version %s,"
1205		"chiprev %02x, locctl %02x\n"
1206		"irqenb0 %02x irqenb1 %02x "
1207		"irqstat0 %02x irqstat1 %02x\n",
1208		driver_name, driver_vers, dev->chiprev,
1209		net2272_read(dev, LOCCTL),
1210		net2272_read(dev, IRQENB0),
1211		net2272_read(dev, IRQENB1),
1212		net2272_read(dev, IRQSTAT0),
1213		net2272_read(dev, IRQSTAT1));
1214	size -= t;
1215	next += t;
1216
1217	/* DMA */
1218	t1 = net2272_read(dev, DMAREQ);
1219	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1220		t1, ep_name[(t1 & 0x01) + 1],
1221		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1222		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1223		t1 & (1 << DMA_REQUEST) ? "req " : "",
1224		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1225	size -= t;
1226	next += t;
1227
1228	/* USB Control Registers */
1229	t1 = net2272_read(dev, USBCTL1);
1230	if (t1 & (1 << VBUS_PIN)) {
1231		if (t1 & (1 << USB_HIGH_SPEED))
1232			s = "high speed";
1233		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1234			s = "powered";
1235		else
1236			s = "full speed";
1237	} else
1238		s = "not attached";
1239	t = scnprintf(next, size,
1240		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1241		net2272_read(dev, USBCTL0), t1,
1242		net2272_read(dev, OURADDR), s);
1243	size -= t;
1244	next += t;
1245
1246	/* Endpoint Registers */
1247	for (i = 0; i < 4; ++i) {
1248		struct net2272_ep *ep;
1249
1250		ep = &dev->ep[i];
1251		if (i && !ep->desc)
1252			continue;
1253
1254		t1 = net2272_ep_read(ep, EP_CFG);
1255		t2 = net2272_ep_read(ep, EP_RSPSET);
1256		t = scnprintf(next, size,
1257			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1258			"irqenb %02x\n",
1259			ep->ep.name, t1, t2,
1260			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1261			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1262			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1263			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1264			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1265			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1266			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1267			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1268			net2272_ep_read(ep, EP_IRQENB));
1269		size -= t;
1270		next += t;
1271
1272		t = scnprintf(next, size,
1273			"\tstat0 %02x stat1 %02x avail %04x "
1274			"(ep%d%s-%s)%s\n",
1275			net2272_ep_read(ep, EP_STAT0),
1276			net2272_ep_read(ep, EP_STAT1),
1277			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1278			t1 & 0x0f,
1279			ep->is_in ? "in" : "out",
1280			type_string(t1 >> 5),
1281			ep->stopped ? "*" : "");
1282		size -= t;
1283		next += t;
1284
1285		t = scnprintf(next, size,
1286			"\tep_transfer %06x\n",
1287			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1288			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1289			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1290		size -= t;
1291		next += t;
1292
1293		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1294		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1295		t = scnprintf(next, size,
1296			"\tbuf-a %s buf-b %s\n",
1297			buf_state_string(t1),
1298			buf_state_string(t2));
1299		size -= t;
1300		next += t;
1301	}
1302
1303	spin_unlock_irqrestore(&dev->lock, flags);
1304
1305	return PAGE_SIZE - size;
1306}
1307static DEVICE_ATTR_RO(registers);
1308
1309/*---------------------------------------------------------------------------*/
1310
1311static void
1312net2272_set_fifo_mode(struct net2272 *dev, int mode)
1313{
1314	u8 tmp;
1315
1316	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1317	tmp |= (mode << 6);
1318	net2272_write(dev, LOCCTL, tmp);
1319
1320	INIT_LIST_HEAD(&dev->gadget.ep_list);
1321
1322	/* always ep-a, ep-c ... maybe not ep-b */
1323	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1324
1325	switch (mode) {
1326	case 0:
1327		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1328		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1329		break;
1330	case 1:
1331		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1332		dev->ep[1].fifo_size = 1024;
1333		dev->ep[2].fifo_size = 512;
1334		break;
1335	case 2:
1336		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1337		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1338		break;
1339	case 3:
1340		dev->ep[1].fifo_size = 1024;
1341		break;
1342	}
1343
1344	/* ep-c is always 2 512 byte buffers */
1345	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1346	dev->ep[3].fifo_size = 512;
1347}
1348
1349/*---------------------------------------------------------------------------*/
1350
1351static void
1352net2272_usb_reset(struct net2272 *dev)
1353{
1354	dev->gadget.speed = USB_SPEED_UNKNOWN;
1355
1356	net2272_cancel_dma(dev);
1357
1358	net2272_write(dev, IRQENB0, 0);
1359	net2272_write(dev, IRQENB1, 0);
1360
1361	/* clear irq state */
1362	net2272_write(dev, IRQSTAT0, 0xff);
1363	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1364
1365	net2272_write(dev, DMAREQ,
1366		(0 << DMA_BUFFER_VALID) |
1367		(0 << DMA_REQUEST_ENABLE) |
1368		(1 << DMA_CONTROL_DACK) |
1369		(dev->dma_eot_polarity << EOT_POLARITY) |
1370		(dev->dma_dack_polarity << DACK_POLARITY) |
1371		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1372		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1373
1374	net2272_cancel_dma(dev);
1375	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1376
1377	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1378	 * note that the higher level gadget drivers are expected to convert data to little endian.
1379	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1380	 */
1381	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1382	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1383}
1384
1385static void
1386net2272_usb_reinit(struct net2272 *dev)
1387{
1388	int i;
1389
1390	/* basic endpoint init */
1391	for (i = 0; i < 4; ++i) {
1392		struct net2272_ep *ep = &dev->ep[i];
1393
1394		ep->ep.name = ep_name[i];
1395		ep->dev = dev;
1396		ep->num = i;
1397		ep->not_empty = 0;
1398
1399		if (use_dma && ep->num == dma_ep)
1400			ep->dma = 1;
1401
1402		if (i > 0 && i <= 3)
1403			ep->fifo_size = 512;
1404		else
1405			ep->fifo_size = 64;
1406		net2272_ep_reset(ep);
1407	}
1408	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1409
1410	dev->gadget.ep0 = &dev->ep[0].ep;
1411	dev->ep[0].stopped = 0;
1412	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1413}
1414
1415static void
1416net2272_ep0_start(struct net2272 *dev)
1417{
1418	struct net2272_ep *ep0 = &dev->ep[0];
1419
1420	net2272_ep_write(ep0, EP_RSPSET,
1421		(1 << NAK_OUT_PACKETS_MODE) |
1422		(1 << ALT_NAK_OUT_PACKETS));
1423	net2272_ep_write(ep0, EP_RSPCLR,
1424		(1 << HIDE_STATUS_PHASE) |
1425		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1426	net2272_write(dev, USBCTL0,
1427		(dev->softconnect << USB_DETECT_ENABLE) |
1428		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1429		(1 << IO_WAKEUP_ENABLE));
1430	net2272_write(dev, IRQENB0,
1431		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1432		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1433		(1 << DMA_DONE_INTERRUPT_ENABLE));
1434	net2272_write(dev, IRQENB1,
1435		(1 << VBUS_INTERRUPT_ENABLE) |
1436		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1437		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1438}
1439
1440/* when a driver is successfully registered, it will receive
1441 * control requests including set_configuration(), which enables
1442 * non-control requests.  then usb traffic follows until a
1443 * disconnect is reported.  then a host may connect again, or
1444 * the driver might get unbound.
1445 */
1446static int net2272_start(struct usb_gadget *_gadget,
1447		struct usb_gadget_driver *driver)
1448{
1449	struct net2272 *dev;
1450	unsigned i;
1451
1452	if (!driver || !driver->setup ||
1453	    driver->max_speed != USB_SPEED_HIGH)
1454		return -EINVAL;
1455
1456	dev = container_of(_gadget, struct net2272, gadget);
1457
1458	for (i = 0; i < 4; ++i)
1459		dev->ep[i].irqs = 0;
1460	/* hook up the driver ... */
1461	dev->softconnect = 1;
1462	driver->driver.bus = NULL;
1463	dev->driver = driver;
1464
1465	/* ... then enable host detection and ep0; and we're ready
1466	 * for set_configuration as well as eventual disconnect.
1467	 */
1468	net2272_ep0_start(dev);
1469
1470	return 0;
1471}
1472
1473static void
1474stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1475{
1476	int i;
1477
1478	/* don't disconnect if it's not connected */
1479	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1480		driver = NULL;
1481
1482	/* stop hardware; prevent new request submissions;
1483	 * and kill any outstanding requests.
1484	 */
1485	net2272_usb_reset(dev);
1486	for (i = 0; i < 4; ++i)
1487		net2272_dequeue_all(&dev->ep[i]);
1488
1489	/* report disconnect; the driver is already quiesced */
1490	if (driver) {
1491		spin_unlock(&dev->lock);
1492		driver->disconnect(&dev->gadget);
1493		spin_lock(&dev->lock);
1494	}
1495
1496	net2272_usb_reinit(dev);
1497}
1498
1499static int net2272_stop(struct usb_gadget *_gadget)
1500{
1501	struct net2272 *dev;
1502	unsigned long flags;
1503
1504	dev = container_of(_gadget, struct net2272, gadget);
1505
1506	spin_lock_irqsave(&dev->lock, flags);
1507	stop_activity(dev, NULL);
1508	spin_unlock_irqrestore(&dev->lock, flags);
1509
1510	dev->driver = NULL;
1511
1512	return 0;
1513}
1514
1515/*---------------------------------------------------------------------------*/
1516/* handle ep-a/ep-b dma completions */
1517static void
1518net2272_handle_dma(struct net2272_ep *ep)
1519{
1520	struct net2272_request *req;
1521	unsigned len;
1522	int status;
1523
1524	if (!list_empty(&ep->queue))
1525		req = list_entry(ep->queue.next,
1526				struct net2272_request, queue);
1527	else
1528		req = NULL;
1529
1530	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1531
1532	/* Ensure DREQ is de-asserted */
1533	net2272_write(ep->dev, DMAREQ,
1534		(0 << DMA_BUFFER_VALID)
1535	      | (0 << DMA_REQUEST_ENABLE)
1536	      | (1 << DMA_CONTROL_DACK)
1537	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1538	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1539	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1540	      | (ep->dma << DMA_ENDPOINT_SELECT));
1541
1542	ep->dev->dma_busy = 0;
1543
1544	net2272_ep_write(ep, EP_IRQENB,
1545		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1546		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1547		| net2272_ep_read(ep, EP_IRQENB));
1548
1549	/* device-to-host transfer completed */
1550	if (ep->is_in) {
1551		/* validate a short packet or zlp if necessary */
1552		if ((req->req.length % ep->ep.maxpacket != 0) ||
1553				req->req.zero)
1554			set_fifo_bytecount(ep, 0);
1555
1556		net2272_done(ep, req, 0);
1557		if (!list_empty(&ep->queue)) {
1558			req = list_entry(ep->queue.next,
1559					struct net2272_request, queue);
1560			status = net2272_kick_dma(ep, req);
1561			if (status < 0)
1562				net2272_pio_advance(ep);
1563		}
1564
1565	/* host-to-device transfer completed */
1566	} else {
1567		/* terminated with a short packet? */
1568		if (net2272_read(ep->dev, IRQSTAT0) &
1569				(1 << DMA_DONE_INTERRUPT)) {
1570			/* abort system dma */
1571			net2272_cancel_dma(ep->dev);
1572		}
1573
1574		/* EP_TRANSFER will contain the number of bytes
1575		 * actually received.
1576		 * NOTE: There is no overflow detection on EP_TRANSFER:
1577		 * We can't deal with transfers larger than 2^24 bytes!
1578		 */
1579		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1580			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1581			| (net2272_ep_read(ep, EP_TRANSFER0));
1582
1583		if (ep->not_empty)
1584			len += 4;
1585
1586		req->req.actual += len;
1587
1588		/* get any remaining data */
1589		net2272_pio_advance(ep);
1590	}
1591}
1592
1593/*---------------------------------------------------------------------------*/
1594
1595static void
1596net2272_handle_ep(struct net2272_ep *ep)
1597{
1598	struct net2272_request *req;
1599	u8 stat0, stat1;
1600
1601	if (!list_empty(&ep->queue))
1602		req = list_entry(ep->queue.next,
1603			struct net2272_request, queue);
1604	else
1605		req = NULL;
1606
1607	/* ack all, and handle what we care about */
1608	stat0 = net2272_ep_read(ep, EP_STAT0);
1609	stat1 = net2272_ep_read(ep, EP_STAT1);
1610	ep->irqs++;
1611
1612	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1613		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1614
1615	net2272_ep_write(ep, EP_STAT0, stat0 &
1616		~((1 << NAK_OUT_PACKETS)
1617		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1618	net2272_ep_write(ep, EP_STAT1, stat1);
1619
1620	/* data packet(s) received (in the fifo, OUT)
1621	 * direction must be validated, otherwise control read status phase
1622	 * could be interpreted as a valid packet
1623	 */
1624	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1625		net2272_pio_advance(ep);
1626	/* data packet(s) transmitted (IN) */
1627	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1628		net2272_pio_advance(ep);
1629}
1630
1631static struct net2272_ep *
1632net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1633{
1634	struct net2272_ep *ep;
1635
1636	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1637		return &dev->ep[0];
1638
1639	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1640		u8 bEndpointAddress;
1641
1642		if (!ep->desc)
1643			continue;
1644		bEndpointAddress = ep->desc->bEndpointAddress;
1645		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1646			continue;
1647		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1648			return ep;
1649	}
1650	return NULL;
1651}
1652
1653/*
1654 * USB Test Packet:
1655 * JKJKJKJK * 9
1656 * JJKKJJKK * 8
1657 * JJJJKKKK * 8
1658 * JJJJJJJKKKKKKK * 8
1659 * JJJJJJJK * 8
1660 * {JKKKKKKK * 10}, JK
1661 */
1662static const u8 net2272_test_packet[] = {
1663	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1664	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1665	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1666	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1667	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1668	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1669};
1670
1671static void
1672net2272_set_test_mode(struct net2272 *dev, int mode)
1673{
1674	int i;
1675
1676	/* Disable all net2272 interrupts:
1677	 * Nothing but a power cycle should stop the test.
1678	 */
1679	net2272_write(dev, IRQENB0, 0x00);
1680	net2272_write(dev, IRQENB1, 0x00);
1681
1682	/* Force tranceiver to high-speed */
1683	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1684
1685	net2272_write(dev, PAGESEL, 0);
1686	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1687	net2272_write(dev, EP_RSPCLR,
1688			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1689			| (1 << HIDE_STATUS_PHASE));
1690	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1691	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1692
1693	/* wait for status phase to complete */
1694	while (!(net2272_read(dev, EP_STAT0) &
1695				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1696		;
1697
1698	/* Enable test mode */
1699	net2272_write(dev, USBTEST, mode);
1700
1701	/* load test packet */
1702	if (mode == TEST_PACKET) {
1703		/* switch to 8 bit mode */
1704		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1705				~(1 << DATA_WIDTH));
1706
1707		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1708			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1709
1710		/* Validate test packet */
1711		net2272_write(dev, EP_TRANSFER0, 0);
1712	}
1713}
1714
1715static void
1716net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1717{
1718	struct net2272_ep *ep;
1719	u8 num, scratch;
1720
1721	/* starting a control request? */
1722	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1723		union {
1724			u8 raw[8];
1725			struct usb_ctrlrequest	r;
1726		} u;
1727		int tmp = 0;
1728		struct net2272_request *req;
1729
1730		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1731			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1732				dev->gadget.speed = USB_SPEED_HIGH;
1733			else
1734				dev->gadget.speed = USB_SPEED_FULL;
1735			dev_dbg(dev->dev, "%s\n",
1736				usb_speed_string(dev->gadget.speed));
1737		}
1738
1739		ep = &dev->ep[0];
1740		ep->irqs++;
1741
1742		/* make sure any leftover interrupt state is cleared */
1743		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1744		while (!list_empty(&ep->queue)) {
1745			req = list_entry(ep->queue.next,
1746				struct net2272_request, queue);
1747			net2272_done(ep, req,
1748				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1749		}
1750		ep->stopped = 0;
1751		dev->protocol_stall = 0;
1752		net2272_ep_write(ep, EP_STAT0,
1753			    (1 << DATA_IN_TOKEN_INTERRUPT)
1754			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1755			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1756			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1757			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1758		net2272_ep_write(ep, EP_STAT1,
1759			    (1 << TIMEOUT)
1760			  | (1 << USB_OUT_ACK_SENT)
1761			  | (1 << USB_OUT_NAK_SENT)
1762			  | (1 << USB_IN_ACK_RCVD)
1763			  | (1 << USB_IN_NAK_SENT)
1764			  | (1 << USB_STALL_SENT)
1765			  | (1 << LOCAL_OUT_ZLP));
1766
1767		/*
1768		 * Ensure Control Read pre-validation setting is beyond maximum size
1769		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1770		 *    an EP0 transfer following the Control Write is a Control Read,
1771		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1772		 *    pre-validation count.
1773		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1774		 *    the pre-validation count cannot cause an unexpected validatation
1775		 */
1776		net2272_write(dev, PAGESEL, 0);
1777		net2272_write(dev, EP_TRANSFER2, 0xff);
1778		net2272_write(dev, EP_TRANSFER1, 0xff);
1779		net2272_write(dev, EP_TRANSFER0, 0xff);
1780
1781		u.raw[0] = net2272_read(dev, SETUP0);
1782		u.raw[1] = net2272_read(dev, SETUP1);
1783		u.raw[2] = net2272_read(dev, SETUP2);
1784		u.raw[3] = net2272_read(dev, SETUP3);
1785		u.raw[4] = net2272_read(dev, SETUP4);
1786		u.raw[5] = net2272_read(dev, SETUP5);
1787		u.raw[6] = net2272_read(dev, SETUP6);
1788		u.raw[7] = net2272_read(dev, SETUP7);
1789		/*
1790		 * If you have a big endian cpu make sure le16_to_cpus
1791		 * performs the proper byte swapping here...
1792		 */
1793		le16_to_cpus(&u.r.wValue);
1794		le16_to_cpus(&u.r.wIndex);
1795		le16_to_cpus(&u.r.wLength);
1796
1797		/* ack the irq */
1798		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1799		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1800
1801		/* watch control traffic at the token level, and force
1802		 * synchronization before letting the status phase happen.
1803		 */
1804		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1805		if (ep->is_in) {
1806			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1807				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1808				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1809			stop_out_naking(ep);
1810		} else
1811			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1812				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814		net2272_ep_write(ep, EP_IRQENB, scratch);
1815
1816		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1817			goto delegate;
1818		switch (u.r.bRequest) {
1819		case USB_REQ_GET_STATUS: {
1820			struct net2272_ep *e;
1821			u16 status = 0;
1822
1823			switch (u.r.bRequestType & USB_RECIP_MASK) {
1824			case USB_RECIP_ENDPOINT:
1825				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1826				if (!e || u.r.wLength > 2)
1827					goto do_stall;
1828				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1829					status = __constant_cpu_to_le16(1);
1830				else
1831					status = __constant_cpu_to_le16(0);
1832
1833				/* don't bother with a request object! */
1834				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1835				writew(status, net2272_reg_addr(dev, EP_DATA));
1836				set_fifo_bytecount(&dev->ep[0], 0);
1837				allow_status(ep);
1838				dev_vdbg(dev->dev, "%s stat %02x\n",
1839					ep->ep.name, status);
1840				goto next_endpoints;
1841			case USB_RECIP_DEVICE:
1842				if (u.r.wLength > 2)
1843					goto do_stall;
1844				if (dev->gadget.is_selfpowered)
1845					status = (1 << USB_DEVICE_SELF_POWERED);
1846
1847				/* don't bother with a request object! */
1848				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849				writew(status, net2272_reg_addr(dev, EP_DATA));
1850				set_fifo_bytecount(&dev->ep[0], 0);
1851				allow_status(ep);
1852				dev_vdbg(dev->dev, "device stat %02x\n", status);
1853				goto next_endpoints;
1854			case USB_RECIP_INTERFACE:
1855				if (u.r.wLength > 2)
1856					goto do_stall;
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "interface status %02x\n", status);
1864				goto next_endpoints;
1865			}
1866
1867			break;
1868		}
1869		case USB_REQ_CLEAR_FEATURE: {
1870			struct net2272_ep *e;
1871
1872			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1873				goto delegate;
1874			if (u.r.wValue != USB_ENDPOINT_HALT ||
1875			    u.r.wLength != 0)
1876				goto do_stall;
1877			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1878			if (!e)
1879				goto do_stall;
1880			if (e->wedged) {
1881				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1882					ep->ep.name);
1883			} else {
1884				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1885				clear_halt(e);
1886			}
1887			allow_status(ep);
1888			goto next_endpoints;
1889		}
1890		case USB_REQ_SET_FEATURE: {
1891			struct net2272_ep *e;
1892
1893			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894				if (u.r.wIndex != NORMAL_OPERATION)
1895					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1896				allow_status(ep);
1897				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1898				goto next_endpoints;
1899			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1900				goto delegate;
1901			if (u.r.wValue != USB_ENDPOINT_HALT ||
1902			    u.r.wLength != 0)
1903				goto do_stall;
1904			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1905			if (!e)
1906				goto do_stall;
1907			set_halt(e);
1908			allow_status(ep);
1909			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1910			goto next_endpoints;
1911		}
1912		case USB_REQ_SET_ADDRESS: {
1913			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1914			allow_status(ep);
1915			break;
1916		}
1917		default:
1918 delegate:
1919			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1920				"ep_cfg %08x\n",
1921				u.r.bRequestType, u.r.bRequest,
1922				u.r.wValue, u.r.wIndex,
1923				net2272_ep_read(ep, EP_CFG));
1924			spin_unlock(&dev->lock);
1925			tmp = dev->driver->setup(&dev->gadget, &u.r);
1926			spin_lock(&dev->lock);
1927		}
1928
1929		/* stall ep0 on error */
1930		if (tmp < 0) {
1931 do_stall:
1932			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1933				u.r.bRequestType, u.r.bRequest, tmp);
1934			dev->protocol_stall = 1;
1935		}
1936	/* endpoint dma irq? */
1937	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1938		net2272_cancel_dma(dev);
1939		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1940		stat &= ~(1 << DMA_DONE_INTERRUPT);
1941		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1942			? 2 : 1;
1943
1944		ep = &dev->ep[num];
1945		net2272_handle_dma(ep);
1946	}
1947
1948 next_endpoints:
1949	/* endpoint data irq? */
1950	scratch = stat & 0x0f;
1951	stat &= ~0x0f;
1952	for (num = 0; scratch; num++) {
1953		u8 t;
1954
1955		/* does this endpoint's FIFO and queue need tending? */
1956		t = 1 << num;
1957		if ((scratch & t) == 0)
1958			continue;
1959		scratch ^= t;
1960
1961		ep = &dev->ep[num];
1962		net2272_handle_ep(ep);
1963	}
1964
1965	/* some interrupts we can just ignore */
1966	stat &= ~(1 << SOF_INTERRUPT);
1967
1968	if (stat)
1969		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1970}
1971
1972static void
1973net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1974{
1975	u8 tmp, mask;
1976
1977	/* after disconnect there's nothing else to do! */
1978	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1979	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1980
1981	if (stat & tmp) {
1982		bool	reset = false;
1983		bool	disconnect = false;
1984
1985		/*
1986		 * Ignore disconnects and resets if the speed hasn't been set.
1987		 * VBUS can bounce and there's always an initial reset.
1988		 */
1989		net2272_write(dev, IRQSTAT1, tmp);
1990		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1991			if ((stat & (1 << VBUS_INTERRUPT)) &&
1992					(net2272_read(dev, USBCTL1) &
1993						(1 << VBUS_PIN)) == 0) {
1994				disconnect = true;
1995				dev_dbg(dev->dev, "disconnect %s\n",
1996					dev->driver->driver.name);
1997			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1998					(net2272_read(dev, USBCTL1) & mask)
1999						== 0) {
2000				reset = true;
2001				dev_dbg(dev->dev, "reset %s\n",
2002					dev->driver->driver.name);
2003			}
2004
2005			if (disconnect || reset) {
2006				stop_activity(dev, dev->driver);
2007				net2272_ep0_start(dev);
2008				spin_unlock(&dev->lock);
2009				if (reset)
2010					usb_gadget_udc_reset
2011						(&dev->gadget, dev->driver);
2012				else
2013					(dev->driver->disconnect)
2014						(&dev->gadget);
2015				spin_lock(&dev->lock);
2016				return;
2017			}
2018		}
2019		stat &= ~tmp;
2020
2021		if (!stat)
2022			return;
2023	}
2024
2025	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2026	if (stat & tmp) {
2027		net2272_write(dev, IRQSTAT1, tmp);
2028		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2029			if (dev->driver->suspend)
2030				dev->driver->suspend(&dev->gadget);
2031			if (!enable_suspend) {
2032				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2033				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2034			}
2035		} else {
2036			if (dev->driver->resume)
2037				dev->driver->resume(&dev->gadget);
2038		}
2039		stat &= ~tmp;
2040	}
2041
2042	/* clear any other status/irqs */
2043	if (stat)
2044		net2272_write(dev, IRQSTAT1, stat);
2045
2046	/* some status we can just ignore */
2047	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2048			| (1 << SUSPEND_REQUEST_INTERRUPT)
2049			| (1 << RESUME_INTERRUPT));
2050	if (!stat)
2051		return;
2052	else
2053		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2054}
2055
2056static irqreturn_t net2272_irq(int irq, void *_dev)
2057{
2058	struct net2272 *dev = _dev;
2059#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2060	u32 intcsr;
2061#endif
2062#if defined(PLX_PCI_RDK)
2063	u8 dmareq;
2064#endif
2065	spin_lock(&dev->lock);
2066#if defined(PLX_PCI_RDK)
2067	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2068
2069	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2070		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2071				dev->rdk1.plx9054_base_addr + INTCSR);
2072		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2073		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2074		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2075		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2076			dev->rdk1.plx9054_base_addr + INTCSR);
2077	}
2078	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2079		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2080				dev->rdk1.plx9054_base_addr + DMACSR0);
2081
2082		dmareq = net2272_read(dev, DMAREQ);
2083		if (dmareq & 0x01)
2084			net2272_handle_dma(&dev->ep[2]);
2085		else
2086			net2272_handle_dma(&dev->ep[1]);
2087	}
2088#endif
2089#if defined(PLX_PCI_RDK2)
2090	/* see if PCI int for us by checking irqstat */
2091	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2092	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2093		spin_unlock(&dev->lock);
2094		return IRQ_NONE;
2095	}
2096	/* check dma interrupts */
2097#endif
2098	/* Platform/devcice interrupt handler */
2099#if !defined(PLX_PCI_RDK)
2100	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2101	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2102#endif
2103	spin_unlock(&dev->lock);
2104
2105	return IRQ_HANDLED;
2106}
2107
2108static int net2272_present(struct net2272 *dev)
2109{
2110	/*
2111	 * Quick test to see if CPU can communicate properly with the NET2272.
2112	 * Verifies connection using writes and reads to write/read and
2113	 * read-only registers.
2114	 *
2115	 * This routine is strongly recommended especially during early bring-up
2116	 * of new hardware, however for designs that do not apply Power On System
2117	 * Tests (POST) it may discarded (or perhaps minimized).
2118	 */
2119	unsigned int ii;
2120	u8 val, refval;
2121
2122	/* Verify NET2272 write/read SCRATCH register can write and read */
2123	refval = net2272_read(dev, SCRATCH);
2124	for (ii = 0; ii < 0x100; ii += 7) {
2125		net2272_write(dev, SCRATCH, ii);
2126		val = net2272_read(dev, SCRATCH);
2127		if (val != ii) {
2128			dev_dbg(dev->dev,
2129				"%s: write/read SCRATCH register test failed: "
2130				"wrote:0x%2.2x, read:0x%2.2x\n",
2131				__func__, ii, val);
2132			return -EINVAL;
2133		}
2134	}
2135	/* To be nice, we write the original SCRATCH value back: */
2136	net2272_write(dev, SCRATCH, refval);
2137
2138	/* Verify NET2272 CHIPREV register is read-only: */
2139	refval = net2272_read(dev, CHIPREV_2272);
2140	for (ii = 0; ii < 0x100; ii += 7) {
2141		net2272_write(dev, CHIPREV_2272, ii);
2142		val = net2272_read(dev, CHIPREV_2272);
2143		if (val != refval) {
2144			dev_dbg(dev->dev,
2145				"%s: write/read CHIPREV register test failed: "
2146				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2147				__func__, ii, val, refval);
2148			return -EINVAL;
2149		}
2150	}
2151
2152	/*
2153	 * Verify NET2272's "NET2270 legacy revision" register
2154	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2155	 *    register should read the same value, regardless of the NET2272
2156	 *    silicon revision.  The legacy register applies to NET2270
2157	 *    firmware being applied to the NET2272.
2158	 */
2159	val = net2272_read(dev, CHIPREV_LEGACY);
2160	if (val != NET2270_LEGACY_REV) {
2161		/*
2162		 * Unexpected legacy revision value
2163		 * - Perhaps the chip is a NET2270?
2164		 */
2165		dev_dbg(dev->dev,
2166			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2167			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2168			__func__, NET2270_LEGACY_REV, val);
2169		return -EINVAL;
2170	}
2171
2172	/*
2173	 * Verify NET2272 silicon revision
2174	 *  - This revision register is appropriate for the silicon version
2175	 *    of the NET2272
2176	 */
2177	val = net2272_read(dev, CHIPREV_2272);
2178	switch (val) {
2179	case CHIPREV_NET2272_R1:
2180		/*
2181		 * NET2272 Rev 1 has DMA related errata:
2182		 *  - Newer silicon (Rev 1A or better) required
2183		 */
2184		dev_dbg(dev->dev,
2185			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2186			__func__);
2187		break;
2188	case CHIPREV_NET2272_R1A:
2189		break;
2190	default:
2191		/* NET2272 silicon version *may* not work with this firmware */
2192		dev_dbg(dev->dev,
2193			"%s: unexpected silicon revision register value: "
2194			" CHIPREV_2272: 0x%2.2x\n",
2195			__func__, val);
2196		/*
2197		 * Return Success, even though the chip rev is not an expected value
2198		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2199		 *  - Often, new silicon is perfectly compatible
2200		 */
2201	}
2202
2203	/* Success: NET2272 checks out OK */
2204	return 0;
2205}
2206
2207static void
2208net2272_gadget_release(struct device *_dev)
2209{
2210	struct net2272 *dev = dev_get_drvdata(_dev);
2211	kfree(dev);
2212}
2213
2214/*---------------------------------------------------------------------------*/
2215
2216static void
2217net2272_remove(struct net2272 *dev)
2218{
2219	usb_del_gadget_udc(&dev->gadget);
2220	free_irq(dev->irq, dev);
2221	iounmap(dev->base_addr);
2222	device_remove_file(dev->dev, &dev_attr_registers);
2223
2224	dev_info(dev->dev, "unbind\n");
2225}
2226
2227static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2228{
2229	struct net2272 *ret;
2230
2231	if (!irq) {
2232		dev_dbg(dev, "No IRQ!\n");
2233		return ERR_PTR(-ENODEV);
2234	}
2235
2236	/* alloc, and start init */
2237	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2238	if (!ret)
2239		return ERR_PTR(-ENOMEM);
2240
2241	spin_lock_init(&ret->lock);
2242	ret->irq = irq;
2243	ret->dev = dev;
2244	ret->gadget.ops = &net2272_ops;
2245	ret->gadget.max_speed = USB_SPEED_HIGH;
2246
2247	/* the "gadget" abstracts/virtualizes the controller */
2248	ret->gadget.name = driver_name;
2249
2250	return ret;
2251}
2252
2253static int
2254net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2255{
2256	int ret;
2257
2258	/* See if there... */
2259	if (net2272_present(dev)) {
2260		dev_warn(dev->dev, "2272 not found!\n");
2261		ret = -ENODEV;
2262		goto err;
2263	}
2264
2265	net2272_usb_reset(dev);
2266	net2272_usb_reinit(dev);
2267
2268	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2269	if (ret) {
2270		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2271		goto err;
2272	}
2273
2274	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2275
2276	/* done */
2277	dev_info(dev->dev, "%s\n", driver_desc);
2278	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2279		dev->irq, dev->base_addr, dev->chiprev,
2280		dma_mode_string());
2281	dev_info(dev->dev, "version: %s\n", driver_vers);
2282
2283	ret = device_create_file(dev->dev, &dev_attr_registers);
2284	if (ret)
2285		goto err_irq;
2286
2287	ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2288			net2272_gadget_release);
2289	if (ret)
2290		goto err_add_udc;
2291
2292	return 0;
2293
2294err_add_udc:
2295	device_remove_file(dev->dev, &dev_attr_registers);
2296 err_irq:
2297	free_irq(dev->irq, dev);
2298 err:
2299	return ret;
2300}
2301
2302#ifdef CONFIG_PCI
2303
2304/*
2305 * wrap this driver around the specified device, but
2306 * don't respond over USB until a gadget driver binds to us
2307 */
2308
2309static int
2310net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2311{
2312	unsigned long resource, len, tmp;
2313	void __iomem *mem_mapped_addr[4];
2314	int ret, i;
2315
2316	/*
2317	 * BAR 0 holds PLX 9054 config registers
2318	 * BAR 1 is i/o memory; unused here
2319	 * BAR 2 holds EPLD config registers
2320	 * BAR 3 holds NET2272 registers
2321	 */
2322
2323	/* Find and map all address spaces */
2324	for (i = 0; i < 4; ++i) {
2325		if (i == 1)
2326			continue;	/* BAR1 unused */
2327
2328		resource = pci_resource_start(pdev, i);
2329		len = pci_resource_len(pdev, i);
2330
2331		if (!request_mem_region(resource, len, driver_name)) {
2332			dev_dbg(dev->dev, "controller already in use\n");
2333			ret = -EBUSY;
2334			goto err;
2335		}
2336
2337		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2338		if (mem_mapped_addr[i] == NULL) {
2339			release_mem_region(resource, len);
2340			dev_dbg(dev->dev, "can't map memory\n");
2341			ret = -EFAULT;
2342			goto err;
2343		}
2344	}
2345
2346	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2347	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2348	dev->base_addr = mem_mapped_addr[3];
2349
2350	/* Set PLX 9054 bus width (16 bits) */
2351	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2352	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2353			dev->rdk1.plx9054_base_addr + LBRD1);
2354
2355	/* Enable PLX 9054 Interrupts */
2356	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2357			(1 << PCI_INTERRUPT_ENABLE) |
2358			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2359			dev->rdk1.plx9054_base_addr + INTCSR);
2360
2361	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2362			dev->rdk1.plx9054_base_addr + DMACSR0);
2363
2364	/* reset */
2365	writeb((1 << EPLD_DMA_ENABLE) |
2366		(1 << DMA_CTL_DACK) |
2367		(1 << DMA_TIMEOUT_ENABLE) |
2368		(1 << USER) |
2369		(0 << MPX_MODE) |
2370		(1 << BUSWIDTH) |
2371		(1 << NET2272_RESET),
2372		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2373
2374	mb();
2375	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2376		~(1 << NET2272_RESET),
2377		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2378	udelay(200);
2379
2380	return 0;
2381
2382 err:
2383	while (--i >= 0) {
2384		iounmap(mem_mapped_addr[i]);
2385		release_mem_region(pci_resource_start(pdev, i),
2386			pci_resource_len(pdev, i));
2387	}
2388
2389	return ret;
2390}
2391
2392static int
2393net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2394{
2395	unsigned long resource, len;
2396	void __iomem *mem_mapped_addr[2];
2397	int ret, i;
2398
2399	/*
2400	 * BAR 0 holds FGPA config registers
2401	 * BAR 1 holds NET2272 registers
2402	 */
2403
2404	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2405	for (i = 0; i < 2; ++i) {
2406		resource = pci_resource_start(pdev, i);
2407		len = pci_resource_len(pdev, i);
2408
2409		if (!request_mem_region(resource, len, driver_name)) {
2410			dev_dbg(dev->dev, "controller already in use\n");
2411			ret = -EBUSY;
2412			goto err;
2413		}
2414
2415		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2416		if (mem_mapped_addr[i] == NULL) {
2417			release_mem_region(resource, len);
2418			dev_dbg(dev->dev, "can't map memory\n");
2419			ret = -EFAULT;
2420			goto err;
2421		}
2422	}
2423
2424	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2425	dev->base_addr = mem_mapped_addr[1];
2426
2427	mb();
2428	/* Set 2272 bus width (16 bits) and reset */
2429	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2430	udelay(200);
2431	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2432	/* Print fpga version number */
2433	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2434		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2435	/* Enable FPGA Interrupts */
2436	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2437
2438	return 0;
2439
2440 err:
2441	while (--i >= 0) {
2442		iounmap(mem_mapped_addr[i]);
2443		release_mem_region(pci_resource_start(pdev, i),
2444			pci_resource_len(pdev, i));
2445	}
2446
2447	return ret;
2448}
2449
2450static int
2451net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2452{
2453	struct net2272 *dev;
2454	int ret;
2455
2456	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2457	if (IS_ERR(dev))
2458		return PTR_ERR(dev);
2459	dev->dev_id = pdev->device;
2460
2461	if (pci_enable_device(pdev) < 0) {
2462		ret = -ENODEV;
2463		goto err_free;
2464	}
2465
2466	pci_set_master(pdev);
2467
2468	switch (pdev->device) {
2469	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2470	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2471	default: BUG();
2472	}
2473	if (ret)
2474		goto err_pci;
2475
2476	ret = net2272_probe_fin(dev, 0);
2477	if (ret)
2478		goto err_pci;
2479
2480	pci_set_drvdata(pdev, dev);
2481
2482	return 0;
2483
2484 err_pci:
2485	pci_disable_device(pdev);
2486 err_free:
2487	kfree(dev);
2488
2489	return ret;
2490}
2491
2492static void
2493net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2494{
2495	int i;
2496
2497	/* disable PLX 9054 interrupts */
2498	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2499		~(1 << PCI_INTERRUPT_ENABLE),
2500		dev->rdk1.plx9054_base_addr + INTCSR);
2501
2502	/* clean up resources allocated during probe() */
2503	iounmap(dev->rdk1.plx9054_base_addr);
2504	iounmap(dev->rdk1.epld_base_addr);
2505
2506	for (i = 0; i < 4; ++i) {
2507		if (i == 1)
2508			continue;	/* BAR1 unused */
2509		release_mem_region(pci_resource_start(pdev, i),
2510			pci_resource_len(pdev, i));
2511	}
2512}
2513
2514static void
2515net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2516{
2517	int i;
2518
2519	/* disable fpga interrupts
2520	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2521			~(1 << PCI_INTERRUPT_ENABLE),
2522			dev->rdk1.plx9054_base_addr + INTCSR);
2523	*/
2524
2525	/* clean up resources allocated during probe() */
2526	iounmap(dev->rdk2.fpga_base_addr);
2527
2528	for (i = 0; i < 2; ++i)
2529		release_mem_region(pci_resource_start(pdev, i),
2530			pci_resource_len(pdev, i));
2531}
2532
2533static void
2534net2272_pci_remove(struct pci_dev *pdev)
2535{
2536	struct net2272 *dev = pci_get_drvdata(pdev);
2537
2538	net2272_remove(dev);
2539
2540	switch (pdev->device) {
2541	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2542	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2543	default: BUG();
2544	}
2545
2546	pci_disable_device(pdev);
2547
2548	kfree(dev);
2549}
2550
2551/* Table of matching PCI IDs */
2552static struct pci_device_id pci_ids[] = {
2553	{	/* RDK 1 card */
2554		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2555		.class_mask  = 0,
2556		.vendor      = PCI_VENDOR_ID_PLX,
2557		.device      = PCI_DEVICE_ID_RDK1,
2558		.subvendor   = PCI_ANY_ID,
2559		.subdevice   = PCI_ANY_ID,
2560	},
2561	{	/* RDK 2 card */
2562		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2563		.class_mask  = 0,
2564		.vendor      = PCI_VENDOR_ID_PLX,
2565		.device      = PCI_DEVICE_ID_RDK2,
2566		.subvendor   = PCI_ANY_ID,
2567		.subdevice   = PCI_ANY_ID,
2568	},
2569	{ }
2570};
2571MODULE_DEVICE_TABLE(pci, pci_ids);
2572
2573static struct pci_driver net2272_pci_driver = {
2574	.name     = driver_name,
2575	.id_table = pci_ids,
2576
2577	.probe    = net2272_pci_probe,
2578	.remove   = net2272_pci_remove,
2579};
2580
2581static int net2272_pci_register(void)
2582{
2583	return pci_register_driver(&net2272_pci_driver);
2584}
2585
2586static void net2272_pci_unregister(void)
2587{
2588	pci_unregister_driver(&net2272_pci_driver);
2589}
2590
2591#else
2592static inline int net2272_pci_register(void) { return 0; }
2593static inline void net2272_pci_unregister(void) { }
2594#endif
2595
2596/*---------------------------------------------------------------------------*/
2597
2598static int
2599net2272_plat_probe(struct platform_device *pdev)
2600{
2601	struct net2272 *dev;
2602	int ret;
2603	unsigned int irqflags;
2604	resource_size_t base, len;
2605	struct resource *iomem, *iomem_bus, *irq_res;
2606
2607	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2608	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2609	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2610	if (!irq_res || !iomem) {
2611		dev_err(&pdev->dev, "must provide irq/base addr");
2612		return -EINVAL;
2613	}
2614
2615	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2616	if (IS_ERR(dev))
2617		return PTR_ERR(dev);
2618
2619	irqflags = 0;
2620	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2621		irqflags |= IRQF_TRIGGER_RISING;
2622	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2623		irqflags |= IRQF_TRIGGER_FALLING;
2624	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2625		irqflags |= IRQF_TRIGGER_HIGH;
2626	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2627		irqflags |= IRQF_TRIGGER_LOW;
2628
2629	base = iomem->start;
2630	len = resource_size(iomem);
2631	if (iomem_bus)
2632		dev->base_shift = iomem_bus->start;
2633
2634	if (!request_mem_region(base, len, driver_name)) {
2635		dev_dbg(dev->dev, "get request memory region!\n");
2636		ret = -EBUSY;
2637		goto err;
2638	}
2639	dev->base_addr = ioremap_nocache(base, len);
2640	if (!dev->base_addr) {
2641		dev_dbg(dev->dev, "can't map memory\n");
2642		ret = -EFAULT;
2643		goto err_req;
2644	}
2645
2646	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2647	if (ret)
2648		goto err_io;
2649
2650	platform_set_drvdata(pdev, dev);
2651	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2652		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2653
2654	return 0;
2655
2656 err_io:
2657	iounmap(dev->base_addr);
2658 err_req:
2659	release_mem_region(base, len);
2660 err:
2661	return ret;
2662}
2663
2664static int
2665net2272_plat_remove(struct platform_device *pdev)
2666{
2667	struct net2272 *dev = platform_get_drvdata(pdev);
2668
2669	net2272_remove(dev);
2670
2671	release_mem_region(pdev->resource[0].start,
2672		resource_size(&pdev->resource[0]));
2673
2674	kfree(dev);
2675
2676	return 0;
2677}
2678
2679static struct platform_driver net2272_plat_driver = {
2680	.probe   = net2272_plat_probe,
2681	.remove  = net2272_plat_remove,
2682	.driver  = {
2683		.name  = driver_name,
2684	},
2685	/* FIXME .suspend, .resume */
2686};
2687MODULE_ALIAS("platform:net2272");
2688
2689static int __init net2272_init(void)
2690{
2691	int ret;
2692
2693	ret = net2272_pci_register();
2694	if (ret)
2695		return ret;
2696	ret = platform_driver_register(&net2272_plat_driver);
2697	if (ret)
2698		goto err_pci;
2699	return ret;
2700
2701err_pci:
2702	net2272_pci_unregister();
2703	return ret;
2704}
2705module_init(net2272_init);
2706
2707static void __exit net2272_cleanup(void)
2708{
2709	net2272_pci_unregister();
2710	platform_driver_unregister(&net2272_plat_driver);
2711}
2712module_exit(net2272_cleanup);
2713
2714MODULE_DESCRIPTION(DRIVER_DESC);
2715MODULE_AUTHOR("PLX Technology, Inc.");
2716MODULE_LICENSE("GPL");
2717