1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 *	   Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/platform_data/mv_usb.h>
37#include <asm/unaligned.h>
38
39#include "mv_udc.h"
40
41#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
42#define DRIVER_VERSION		"8 Nov 2010"
43
44#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
45				((ep)->udc->ep0_dir) : ((ep)->direction))
46
47/* timeout value -- usec */
48#define RESET_TIMEOUT		10000
49#define FLUSH_TIMEOUT		10000
50#define EPSTATUS_TIMEOUT	10000
51#define PRIME_TIMEOUT		10000
52#define READSAFE_TIMEOUT	1000
53
54#define LOOPS_USEC_SHIFT	1
55#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
56#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
57
58static DECLARE_COMPLETION(release_done);
59
60static const char driver_name[] = "mv_udc";
61static const char driver_desc[] = DRIVER_DESC;
62
63static void nuke(struct mv_ep *ep, int status);
64static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
65
66/* for endpoint 0 operations */
67static const struct usb_endpoint_descriptor mv_ep0_desc = {
68	.bLength =		USB_DT_ENDPOINT_SIZE,
69	.bDescriptorType =	USB_DT_ENDPOINT,
70	.bEndpointAddress =	0,
71	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
72	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
73};
74
75static void ep0_reset(struct mv_udc *udc)
76{
77	struct mv_ep *ep;
78	u32 epctrlx;
79	int i = 0;
80
81	/* ep0 in and out */
82	for (i = 0; i < 2; i++) {
83		ep = &udc->eps[i];
84		ep->udc = udc;
85
86		/* ep0 dQH */
87		ep->dqh = &udc->ep_dqh[i];
88
89		/* configure ep0 endpoint capabilities in dQH */
90		ep->dqh->max_packet_length =
91			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
92			| EP_QUEUE_HEAD_IOS;
93
94		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
95
96		epctrlx = readl(&udc->op_regs->epctrlx[0]);
97		if (i) {	/* TX */
98			epctrlx |= EPCTRL_TX_ENABLE
99				| (USB_ENDPOINT_XFER_CONTROL
100					<< EPCTRL_TX_EP_TYPE_SHIFT);
101
102		} else {	/* RX */
103			epctrlx |= EPCTRL_RX_ENABLE
104				| (USB_ENDPOINT_XFER_CONTROL
105					<< EPCTRL_RX_EP_TYPE_SHIFT);
106		}
107
108		writel(epctrlx, &udc->op_regs->epctrlx[0]);
109	}
110}
111
112/* protocol ep0 stall, will automatically be cleared on new transaction */
113static void ep0_stall(struct mv_udc *udc)
114{
115	u32	epctrlx;
116
117	/* set TX and RX to stall */
118	epctrlx = readl(&udc->op_regs->epctrlx[0]);
119	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
120	writel(epctrlx, &udc->op_regs->epctrlx[0]);
121
122	/* update ep0 state */
123	udc->ep0_state = WAIT_FOR_SETUP;
124	udc->ep0_dir = EP_DIR_OUT;
125}
126
127static int process_ep_req(struct mv_udc *udc, int index,
128	struct mv_req *curr_req)
129{
130	struct mv_dtd	*curr_dtd;
131	struct mv_dqh	*curr_dqh;
132	int td_complete, actual, remaining_length;
133	int i, direction;
134	int retval = 0;
135	u32 errors;
136	u32 bit_pos;
137
138	curr_dqh = &udc->ep_dqh[index];
139	direction = index % 2;
140
141	curr_dtd = curr_req->head;
142	td_complete = 0;
143	actual = curr_req->req.length;
144
145	for (i = 0; i < curr_req->dtd_count; i++) {
146		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
147			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
148				udc->eps[index].name);
149			return 1;
150		}
151
152		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
153		if (!errors) {
154			remaining_length =
155				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
156					>> DTD_LENGTH_BIT_POS;
157			actual -= remaining_length;
158
159			if (remaining_length) {
160				if (direction) {
161					dev_dbg(&udc->dev->dev,
162						"TX dTD remains data\n");
163					retval = -EPROTO;
164					break;
165				} else
166					break;
167			}
168		} else {
169			dev_info(&udc->dev->dev,
170				"complete_tr error: ep=%d %s: error = 0x%x\n",
171				index >> 1, direction ? "SEND" : "RECV",
172				errors);
173			if (errors & DTD_STATUS_HALTED) {
174				/* Clear the errors and Halt condition */
175				curr_dqh->size_ioc_int_sts &= ~errors;
176				retval = -EPIPE;
177			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
178				retval = -EPROTO;
179			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
180				retval = -EILSEQ;
181			}
182		}
183		if (i != curr_req->dtd_count - 1)
184			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
185	}
186	if (retval)
187		return retval;
188
189	if (direction == EP_DIR_OUT)
190		bit_pos = 1 << curr_req->ep->ep_num;
191	else
192		bit_pos = 1 << (16 + curr_req->ep->ep_num);
193
194	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
195		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
196			while (readl(&udc->op_regs->epstatus) & bit_pos)
197				udelay(1);
198			break;
199		}
200		udelay(1);
201	}
202
203	curr_req->req.actual = actual;
204
205	return 0;
206}
207
208/*
209 * done() - retire a request; caller blocked irqs
210 * @status : request status to be set, only works when
211 * request is still in progress.
212 */
213static void done(struct mv_ep *ep, struct mv_req *req, int status)
214	__releases(&ep->udc->lock)
215	__acquires(&ep->udc->lock)
216{
217	struct mv_udc *udc = NULL;
218	unsigned char stopped = ep->stopped;
219	struct mv_dtd *curr_td, *next_td;
220	int j;
221
222	udc = (struct mv_udc *)ep->udc;
223	/* Removed the req from fsl_ep->queue */
224	list_del_init(&req->queue);
225
226	/* req.status should be set as -EINPROGRESS in ep_queue() */
227	if (req->req.status == -EINPROGRESS)
228		req->req.status = status;
229	else
230		status = req->req.status;
231
232	/* Free dtd for the request */
233	next_td = req->head;
234	for (j = 0; j < req->dtd_count; j++) {
235		curr_td = next_td;
236		if (j != req->dtd_count - 1)
237			next_td = curr_td->next_dtd_virt;
238		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
239	}
240
241	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
242
243	if (status && (status != -ESHUTDOWN))
244		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
245			ep->ep.name, &req->req, status,
246			req->req.actual, req->req.length);
247
248	ep->stopped = 1;
249
250	spin_unlock(&ep->udc->lock);
251
252	usb_gadget_giveback_request(&ep->ep, &req->req);
253
254	spin_lock(&ep->udc->lock);
255	ep->stopped = stopped;
256}
257
258static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
259{
260	struct mv_udc *udc;
261	struct mv_dqh *dqh;
262	u32 bit_pos, direction;
263	u32 usbcmd, epstatus;
264	unsigned int loops;
265	int retval = 0;
266
267	udc = ep->udc;
268	direction = ep_dir(ep);
269	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
270	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
271
272	/* check if the pipe is empty */
273	if (!(list_empty(&ep->queue))) {
274		struct mv_req *lastreq;
275		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
276		lastreq->tail->dtd_next =
277			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
278
279		wmb();
280
281		if (readl(&udc->op_regs->epprime) & bit_pos)
282			goto done;
283
284		loops = LOOPS(READSAFE_TIMEOUT);
285		while (1) {
286			/* start with setting the semaphores */
287			usbcmd = readl(&udc->op_regs->usbcmd);
288			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
289			writel(usbcmd, &udc->op_regs->usbcmd);
290
291			/* read the endpoint status */
292			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
293
294			/*
295			 * Reread the ATDTW semaphore bit to check if it is
296			 * cleared. When hardware see a hazard, it will clear
297			 * the bit or else we remain set to 1 and we can
298			 * proceed with priming of endpoint if not already
299			 * primed.
300			 */
301			if (readl(&udc->op_regs->usbcmd)
302				& USBCMD_ATDTW_TRIPWIRE_SET)
303				break;
304
305			loops--;
306			if (loops == 0) {
307				dev_err(&udc->dev->dev,
308					"Timeout for ATDTW_TRIPWIRE...\n");
309				retval = -ETIME;
310				goto done;
311			}
312			udelay(LOOPS_USEC);
313		}
314
315		/* Clear the semaphore */
316		usbcmd = readl(&udc->op_regs->usbcmd);
317		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
318		writel(usbcmd, &udc->op_regs->usbcmd);
319
320		if (epstatus)
321			goto done;
322	}
323
324	/* Write dQH next pointer and terminate bit to 0 */
325	dqh->next_dtd_ptr = req->head->td_dma
326				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
327
328	/* clear active and halt bit, in case set from a previous error */
329	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
330
331	/* Ensure that updates to the QH will occur before priming. */
332	wmb();
333
334	/* Prime the Endpoint */
335	writel(bit_pos, &udc->op_regs->epprime);
336
337done:
338	return retval;
339}
340
341static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
342		dma_addr_t *dma, int *is_last)
343{
344	struct mv_dtd *dtd;
345	struct mv_udc *udc;
346	struct mv_dqh *dqh;
347	u32 temp, mult = 0;
348
349	/* how big will this transfer be? */
350	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
351		dqh = req->ep->dqh;
352		mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
353				& 0x3;
354		*length = min(req->req.length - req->req.actual,
355				(unsigned)(mult * req->ep->ep.maxpacket));
356	} else
357		*length = min(req->req.length - req->req.actual,
358				(unsigned)EP_MAX_LENGTH_TRANSFER);
359
360	udc = req->ep->udc;
361
362	/*
363	 * Be careful that no _GFP_HIGHMEM is set,
364	 * or we can not use dma_to_virt
365	 */
366	dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
367	if (dtd == NULL)
368		return dtd;
369
370	dtd->td_dma = *dma;
371	/* initialize buffer page pointers */
372	temp = (u32)(req->req.dma + req->req.actual);
373	dtd->buff_ptr0 = cpu_to_le32(temp);
374	temp &= ~0xFFF;
375	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
376	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
377	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
378	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
379
380	req->req.actual += *length;
381
382	/* zlp is needed if req->req.zero is set */
383	if (req->req.zero) {
384		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
385			*is_last = 1;
386		else
387			*is_last = 0;
388	} else if (req->req.length == req->req.actual)
389		*is_last = 1;
390	else
391		*is_last = 0;
392
393	/* Fill in the transfer size; set active bit */
394	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
395
396	/* Enable interrupt for the last dtd of a request */
397	if (*is_last && !req->req.no_interrupt)
398		temp |= DTD_IOC;
399
400	temp |= mult << 10;
401
402	dtd->size_ioc_sts = temp;
403
404	mb();
405
406	return dtd;
407}
408
409/* generate dTD linked list for a request */
410static int req_to_dtd(struct mv_req *req)
411{
412	unsigned count;
413	int is_last, is_first = 1;
414	struct mv_dtd *dtd, *last_dtd = NULL;
415	struct mv_udc *udc;
416	dma_addr_t dma;
417
418	udc = req->ep->udc;
419
420	do {
421		dtd = build_dtd(req, &count, &dma, &is_last);
422		if (dtd == NULL)
423			return -ENOMEM;
424
425		if (is_first) {
426			is_first = 0;
427			req->head = dtd;
428		} else {
429			last_dtd->dtd_next = dma;
430			last_dtd->next_dtd_virt = dtd;
431		}
432		last_dtd = dtd;
433		req->dtd_count++;
434	} while (!is_last);
435
436	/* set terminate bit to 1 for the last dTD */
437	dtd->dtd_next = DTD_NEXT_TERMINATE;
438
439	req->tail = dtd;
440
441	return 0;
442}
443
444static int mv_ep_enable(struct usb_ep *_ep,
445		const struct usb_endpoint_descriptor *desc)
446{
447	struct mv_udc *udc;
448	struct mv_ep *ep;
449	struct mv_dqh *dqh;
450	u16 max = 0;
451	u32 bit_pos, epctrlx, direction;
452	unsigned char zlt = 0, ios = 0, mult = 0;
453	unsigned long flags;
454
455	ep = container_of(_ep, struct mv_ep, ep);
456	udc = ep->udc;
457
458	if (!_ep || !desc
459			|| desc->bDescriptorType != USB_DT_ENDPOINT)
460		return -EINVAL;
461
462	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
463		return -ESHUTDOWN;
464
465	direction = ep_dir(ep);
466	max = usb_endpoint_maxp(desc);
467
468	/*
469	 * disable HW zero length termination select
470	 * driver handles zero length packet through req->req.zero
471	 */
472	zlt = 1;
473
474	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
475
476	/* Check if the Endpoint is Primed */
477	if ((readl(&udc->op_regs->epprime) & bit_pos)
478		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
479		dev_info(&udc->dev->dev,
480			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
481			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
482			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
483			(unsigned)readl(&udc->op_regs->epprime),
484			(unsigned)readl(&udc->op_regs->epstatus),
485			(unsigned)bit_pos);
486		goto en_done;
487	}
488	/* Set the max packet length, interrupt on Setup and Mult fields */
489	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
490	case USB_ENDPOINT_XFER_BULK:
491		zlt = 1;
492		mult = 0;
493		break;
494	case USB_ENDPOINT_XFER_CONTROL:
495		ios = 1;
496	case USB_ENDPOINT_XFER_INT:
497		mult = 0;
498		break;
499	case USB_ENDPOINT_XFER_ISOC:
500		/* Calculate transactions needed for high bandwidth iso */
501		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
502		max = max & 0x7ff;	/* bit 0~10 */
503		/* 3 transactions at most */
504		if (mult > 3)
505			goto en_done;
506		break;
507	default:
508		goto en_done;
509	}
510
511	spin_lock_irqsave(&udc->lock, flags);
512	/* Get the endpoint queue head address */
513	dqh = ep->dqh;
514	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
515		| (mult << EP_QUEUE_HEAD_MULT_POS)
516		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
517		| (ios ? EP_QUEUE_HEAD_IOS : 0);
518	dqh->next_dtd_ptr = 1;
519	dqh->size_ioc_int_sts = 0;
520
521	ep->ep.maxpacket = max;
522	ep->ep.desc = desc;
523	ep->stopped = 0;
524
525	/* Enable the endpoint for Rx or Tx and set the endpoint type */
526	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
527	if (direction == EP_DIR_IN) {
528		epctrlx &= ~EPCTRL_TX_ALL_MASK;
529		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
530			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
531				<< EPCTRL_TX_EP_TYPE_SHIFT);
532	} else {
533		epctrlx &= ~EPCTRL_RX_ALL_MASK;
534		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
535			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
536				<< EPCTRL_RX_EP_TYPE_SHIFT);
537	}
538	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
539
540	/*
541	 * Implement Guideline (GL# USB-7) The unused endpoint type must
542	 * be programmed to bulk.
543	 */
544	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
545	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
546		epctrlx |= (USB_ENDPOINT_XFER_BULK
547				<< EPCTRL_RX_EP_TYPE_SHIFT);
548		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
549	}
550
551	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
552	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
553		epctrlx |= (USB_ENDPOINT_XFER_BULK
554				<< EPCTRL_TX_EP_TYPE_SHIFT);
555		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
556	}
557
558	spin_unlock_irqrestore(&udc->lock, flags);
559
560	return 0;
561en_done:
562	return -EINVAL;
563}
564
565static int  mv_ep_disable(struct usb_ep *_ep)
566{
567	struct mv_udc *udc;
568	struct mv_ep *ep;
569	struct mv_dqh *dqh;
570	u32 bit_pos, epctrlx, direction;
571	unsigned long flags;
572
573	ep = container_of(_ep, struct mv_ep, ep);
574	if ((_ep == NULL) || !ep->ep.desc)
575		return -EINVAL;
576
577	udc = ep->udc;
578
579	/* Get the endpoint queue head address */
580	dqh = ep->dqh;
581
582	spin_lock_irqsave(&udc->lock, flags);
583
584	direction = ep_dir(ep);
585	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
586
587	/* Reset the max packet length and the interrupt on Setup */
588	dqh->max_packet_length = 0;
589
590	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
591	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
592	epctrlx &= ~((direction == EP_DIR_IN)
593			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
594			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
595	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
596
597	/* nuke all pending requests (does flush) */
598	nuke(ep, -ESHUTDOWN);
599
600	ep->ep.desc = NULL;
601	ep->stopped = 1;
602
603	spin_unlock_irqrestore(&udc->lock, flags);
604
605	return 0;
606}
607
608static struct usb_request *
609mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
610{
611	struct mv_req *req = NULL;
612
613	req = kzalloc(sizeof *req, gfp_flags);
614	if (!req)
615		return NULL;
616
617	req->req.dma = DMA_ADDR_INVALID;
618	INIT_LIST_HEAD(&req->queue);
619
620	return &req->req;
621}
622
623static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
624{
625	struct mv_req *req = NULL;
626
627	req = container_of(_req, struct mv_req, req);
628
629	if (_req)
630		kfree(req);
631}
632
633static void mv_ep_fifo_flush(struct usb_ep *_ep)
634{
635	struct mv_udc *udc;
636	u32 bit_pos, direction;
637	struct mv_ep *ep;
638	unsigned int loops;
639
640	if (!_ep)
641		return;
642
643	ep = container_of(_ep, struct mv_ep, ep);
644	if (!ep->ep.desc)
645		return;
646
647	udc = ep->udc;
648	direction = ep_dir(ep);
649
650	if (ep->ep_num == 0)
651		bit_pos = (1 << 16) | 1;
652	else if (direction == EP_DIR_OUT)
653		bit_pos = 1 << ep->ep_num;
654	else
655		bit_pos = 1 << (16 + ep->ep_num);
656
657	loops = LOOPS(EPSTATUS_TIMEOUT);
658	do {
659		unsigned int inter_loops;
660
661		if (loops == 0) {
662			dev_err(&udc->dev->dev,
663				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
664				(unsigned)readl(&udc->op_regs->epstatus),
665				(unsigned)bit_pos);
666			return;
667		}
668		/* Write 1 to the Flush register */
669		writel(bit_pos, &udc->op_regs->epflush);
670
671		/* Wait until flushing completed */
672		inter_loops = LOOPS(FLUSH_TIMEOUT);
673		while (readl(&udc->op_regs->epflush)) {
674			/*
675			 * ENDPTFLUSH bit should be cleared to indicate this
676			 * operation is complete
677			 */
678			if (inter_loops == 0) {
679				dev_err(&udc->dev->dev,
680					"TIMEOUT for ENDPTFLUSH=0x%x,"
681					"bit_pos=0x%x\n",
682					(unsigned)readl(&udc->op_regs->epflush),
683					(unsigned)bit_pos);
684				return;
685			}
686			inter_loops--;
687			udelay(LOOPS_USEC);
688		}
689		loops--;
690	} while (readl(&udc->op_regs->epstatus) & bit_pos);
691}
692
693/* queues (submits) an I/O request to an endpoint */
694static int
695mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
696{
697	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
698	struct mv_req *req = container_of(_req, struct mv_req, req);
699	struct mv_udc *udc = ep->udc;
700	unsigned long flags;
701	int retval;
702
703	/* catch various bogus parameters */
704	if (!_req || !req->req.complete || !req->req.buf
705			|| !list_empty(&req->queue)) {
706		dev_err(&udc->dev->dev, "%s, bad params", __func__);
707		return -EINVAL;
708	}
709	if (unlikely(!_ep || !ep->ep.desc)) {
710		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
711		return -EINVAL;
712	}
713
714	udc = ep->udc;
715	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
716		return -ESHUTDOWN;
717
718	req->ep = ep;
719
720	/* map virtual address to hardware */
721	retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
722	if (retval)
723		return retval;
724
725	req->req.status = -EINPROGRESS;
726	req->req.actual = 0;
727	req->dtd_count = 0;
728
729	spin_lock_irqsave(&udc->lock, flags);
730
731	/* build dtds and push them to device queue */
732	if (!req_to_dtd(req)) {
733		retval = queue_dtd(ep, req);
734		if (retval) {
735			spin_unlock_irqrestore(&udc->lock, flags);
736			dev_err(&udc->dev->dev, "Failed to queue dtd\n");
737			goto err_unmap_dma;
738		}
739	} else {
740		spin_unlock_irqrestore(&udc->lock, flags);
741		dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
742		retval = -ENOMEM;
743		goto err_unmap_dma;
744	}
745
746	/* Update ep0 state */
747	if (ep->ep_num == 0)
748		udc->ep0_state = DATA_STATE_XMIT;
749
750	/* irq handler advances the queue */
751	list_add_tail(&req->queue, &ep->queue);
752	spin_unlock_irqrestore(&udc->lock, flags);
753
754	return 0;
755
756err_unmap_dma:
757	usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
758
759	return retval;
760}
761
762static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
763{
764	struct mv_dqh *dqh = ep->dqh;
765	u32 bit_pos;
766
767	/* Write dQH next pointer and terminate bit to 0 */
768	dqh->next_dtd_ptr = req->head->td_dma
769		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
770
771	/* clear active and halt bit, in case set from a previous error */
772	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
773
774	/* Ensure that updates to the QH will occure before priming. */
775	wmb();
776
777	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
778
779	/* Prime the Endpoint */
780	writel(bit_pos, &ep->udc->op_regs->epprime);
781}
782
783/* dequeues (cancels, unlinks) an I/O request from an endpoint */
784static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
785{
786	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
787	struct mv_req *req;
788	struct mv_udc *udc = ep->udc;
789	unsigned long flags;
790	int stopped, ret = 0;
791	u32 epctrlx;
792
793	if (!_ep || !_req)
794		return -EINVAL;
795
796	spin_lock_irqsave(&ep->udc->lock, flags);
797	stopped = ep->stopped;
798
799	/* Stop the ep before we deal with the queue */
800	ep->stopped = 1;
801	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
802	if (ep_dir(ep) == EP_DIR_IN)
803		epctrlx &= ~EPCTRL_TX_ENABLE;
804	else
805		epctrlx &= ~EPCTRL_RX_ENABLE;
806	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
807
808	/* make sure it's actually queued on this endpoint */
809	list_for_each_entry(req, &ep->queue, queue) {
810		if (&req->req == _req)
811			break;
812	}
813	if (&req->req != _req) {
814		ret = -EINVAL;
815		goto out;
816	}
817
818	/* The request is in progress, or completed but not dequeued */
819	if (ep->queue.next == &req->queue) {
820		_req->status = -ECONNRESET;
821		mv_ep_fifo_flush(_ep);	/* flush current transfer */
822
823		/* The request isn't the last request in this ep queue */
824		if (req->queue.next != &ep->queue) {
825			struct mv_req *next_req;
826
827			next_req = list_entry(req->queue.next,
828				struct mv_req, queue);
829
830			/* Point the QH to the first TD of next request */
831			mv_prime_ep(ep, next_req);
832		} else {
833			struct mv_dqh *qh;
834
835			qh = ep->dqh;
836			qh->next_dtd_ptr = 1;
837			qh->size_ioc_int_sts = 0;
838		}
839
840		/* The request hasn't been processed, patch up the TD chain */
841	} else {
842		struct mv_req *prev_req;
843
844		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
845		writel(readl(&req->tail->dtd_next),
846				&prev_req->tail->dtd_next);
847
848	}
849
850	done(ep, req, -ECONNRESET);
851
852	/* Enable EP */
853out:
854	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
855	if (ep_dir(ep) == EP_DIR_IN)
856		epctrlx |= EPCTRL_TX_ENABLE;
857	else
858		epctrlx |= EPCTRL_RX_ENABLE;
859	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
860	ep->stopped = stopped;
861
862	spin_unlock_irqrestore(&ep->udc->lock, flags);
863	return ret;
864}
865
866static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
867{
868	u32 epctrlx;
869
870	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
871
872	if (stall) {
873		if (direction == EP_DIR_IN)
874			epctrlx |= EPCTRL_TX_EP_STALL;
875		else
876			epctrlx |= EPCTRL_RX_EP_STALL;
877	} else {
878		if (direction == EP_DIR_IN) {
879			epctrlx &= ~EPCTRL_TX_EP_STALL;
880			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
881		} else {
882			epctrlx &= ~EPCTRL_RX_EP_STALL;
883			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
884		}
885	}
886	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
887}
888
889static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
890{
891	u32 epctrlx;
892
893	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
894
895	if (direction == EP_DIR_OUT)
896		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
897	else
898		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
899}
900
901static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
902{
903	struct mv_ep *ep;
904	unsigned long flags = 0;
905	int status = 0;
906	struct mv_udc *udc;
907
908	ep = container_of(_ep, struct mv_ep, ep);
909	udc = ep->udc;
910	if (!_ep || !ep->ep.desc) {
911		status = -EINVAL;
912		goto out;
913	}
914
915	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
916		status = -EOPNOTSUPP;
917		goto out;
918	}
919
920	/*
921	 * Attempt to halt IN ep will fail if any transfer requests
922	 * are still queue
923	 */
924	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
925		status = -EAGAIN;
926		goto out;
927	}
928
929	spin_lock_irqsave(&ep->udc->lock, flags);
930	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
931	if (halt && wedge)
932		ep->wedge = 1;
933	else if (!halt)
934		ep->wedge = 0;
935	spin_unlock_irqrestore(&ep->udc->lock, flags);
936
937	if (ep->ep_num == 0) {
938		udc->ep0_state = WAIT_FOR_SETUP;
939		udc->ep0_dir = EP_DIR_OUT;
940	}
941out:
942	return status;
943}
944
945static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
946{
947	return mv_ep_set_halt_wedge(_ep, halt, 0);
948}
949
950static int mv_ep_set_wedge(struct usb_ep *_ep)
951{
952	return mv_ep_set_halt_wedge(_ep, 1, 1);
953}
954
955static struct usb_ep_ops mv_ep_ops = {
956	.enable		= mv_ep_enable,
957	.disable	= mv_ep_disable,
958
959	.alloc_request	= mv_alloc_request,
960	.free_request	= mv_free_request,
961
962	.queue		= mv_ep_queue,
963	.dequeue	= mv_ep_dequeue,
964
965	.set_wedge	= mv_ep_set_wedge,
966	.set_halt	= mv_ep_set_halt,
967	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
968};
969
970static void udc_clock_enable(struct mv_udc *udc)
971{
972	clk_prepare_enable(udc->clk);
973}
974
975static void udc_clock_disable(struct mv_udc *udc)
976{
977	clk_disable_unprepare(udc->clk);
978}
979
980static void udc_stop(struct mv_udc *udc)
981{
982	u32 tmp;
983
984	/* Disable interrupts */
985	tmp = readl(&udc->op_regs->usbintr);
986	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
987		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
988	writel(tmp, &udc->op_regs->usbintr);
989
990	udc->stopped = 1;
991
992	/* Reset the Run the bit in the command register to stop VUSB */
993	tmp = readl(&udc->op_regs->usbcmd);
994	tmp &= ~USBCMD_RUN_STOP;
995	writel(tmp, &udc->op_regs->usbcmd);
996}
997
998static void udc_start(struct mv_udc *udc)
999{
1000	u32 usbintr;
1001
1002	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1003		| USBINTR_PORT_CHANGE_DETECT_EN
1004		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1005	/* Enable interrupts */
1006	writel(usbintr, &udc->op_regs->usbintr);
1007
1008	udc->stopped = 0;
1009
1010	/* Set the Run bit in the command register */
1011	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1012}
1013
1014static int udc_reset(struct mv_udc *udc)
1015{
1016	unsigned int loops;
1017	u32 tmp, portsc;
1018
1019	/* Stop the controller */
1020	tmp = readl(&udc->op_regs->usbcmd);
1021	tmp &= ~USBCMD_RUN_STOP;
1022	writel(tmp, &udc->op_regs->usbcmd);
1023
1024	/* Reset the controller to get default values */
1025	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1026
1027	/* wait for reset to complete */
1028	loops = LOOPS(RESET_TIMEOUT);
1029	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1030		if (loops == 0) {
1031			dev_err(&udc->dev->dev,
1032				"Wait for RESET completed TIMEOUT\n");
1033			return -ETIMEDOUT;
1034		}
1035		loops--;
1036		udelay(LOOPS_USEC);
1037	}
1038
1039	/* set controller to device mode */
1040	tmp = readl(&udc->op_regs->usbmode);
1041	tmp |= USBMODE_CTRL_MODE_DEVICE;
1042
1043	/* turn setup lockout off, require setup tripwire in usbcmd */
1044	tmp |= USBMODE_SETUP_LOCK_OFF;
1045
1046	writel(tmp, &udc->op_regs->usbmode);
1047
1048	writel(0x0, &udc->op_regs->epsetupstat);
1049
1050	/* Configure the Endpoint List Address */
1051	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1052		&udc->op_regs->eplistaddr);
1053
1054	portsc = readl(&udc->op_regs->portsc[0]);
1055	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1056		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1057
1058	if (udc->force_fs)
1059		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1060	else
1061		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1062
1063	writel(portsc, &udc->op_regs->portsc[0]);
1064
1065	tmp = readl(&udc->op_regs->epctrlx[0]);
1066	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1067	writel(tmp, &udc->op_regs->epctrlx[0]);
1068
1069	return 0;
1070}
1071
1072static int mv_udc_enable_internal(struct mv_udc *udc)
1073{
1074	int retval;
1075
1076	if (udc->active)
1077		return 0;
1078
1079	dev_dbg(&udc->dev->dev, "enable udc\n");
1080	udc_clock_enable(udc);
1081	if (udc->pdata->phy_init) {
1082		retval = udc->pdata->phy_init(udc->phy_regs);
1083		if (retval) {
1084			dev_err(&udc->dev->dev,
1085				"init phy error %d\n", retval);
1086			udc_clock_disable(udc);
1087			return retval;
1088		}
1089	}
1090	udc->active = 1;
1091
1092	return 0;
1093}
1094
1095static int mv_udc_enable(struct mv_udc *udc)
1096{
1097	if (udc->clock_gating)
1098		return mv_udc_enable_internal(udc);
1099
1100	return 0;
1101}
1102
1103static void mv_udc_disable_internal(struct mv_udc *udc)
1104{
1105	if (udc->active) {
1106		dev_dbg(&udc->dev->dev, "disable udc\n");
1107		if (udc->pdata->phy_deinit)
1108			udc->pdata->phy_deinit(udc->phy_regs);
1109		udc_clock_disable(udc);
1110		udc->active = 0;
1111	}
1112}
1113
1114static void mv_udc_disable(struct mv_udc *udc)
1115{
1116	if (udc->clock_gating)
1117		mv_udc_disable_internal(udc);
1118}
1119
1120static int mv_udc_get_frame(struct usb_gadget *gadget)
1121{
1122	struct mv_udc *udc;
1123	u16	retval;
1124
1125	if (!gadget)
1126		return -ENODEV;
1127
1128	udc = container_of(gadget, struct mv_udc, gadget);
1129
1130	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1131
1132	return retval;
1133}
1134
1135/* Tries to wake up the host connected to this gadget */
1136static int mv_udc_wakeup(struct usb_gadget *gadget)
1137{
1138	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1139	u32 portsc;
1140
1141	/* Remote wakeup feature not enabled by host */
1142	if (!udc->remote_wakeup)
1143		return -ENOTSUPP;
1144
1145	portsc = readl(&udc->op_regs->portsc);
1146	/* not suspended? */
1147	if (!(portsc & PORTSCX_PORT_SUSPEND))
1148		return 0;
1149	/* trigger force resume */
1150	portsc |= PORTSCX_PORT_FORCE_RESUME;
1151	writel(portsc, &udc->op_regs->portsc[0]);
1152	return 0;
1153}
1154
1155static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1156{
1157	struct mv_udc *udc;
1158	unsigned long flags;
1159	int retval = 0;
1160
1161	udc = container_of(gadget, struct mv_udc, gadget);
1162	spin_lock_irqsave(&udc->lock, flags);
1163
1164	udc->vbus_active = (is_active != 0);
1165
1166	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1167		__func__, udc->softconnect, udc->vbus_active);
1168
1169	if (udc->driver && udc->softconnect && udc->vbus_active) {
1170		retval = mv_udc_enable(udc);
1171		if (retval == 0) {
1172			/* Clock is disabled, need re-init registers */
1173			udc_reset(udc);
1174			ep0_reset(udc);
1175			udc_start(udc);
1176		}
1177	} else if (udc->driver && udc->softconnect) {
1178		if (!udc->active)
1179			goto out;
1180
1181		/* stop all the transfer in queue*/
1182		stop_activity(udc, udc->driver);
1183		udc_stop(udc);
1184		mv_udc_disable(udc);
1185	}
1186
1187out:
1188	spin_unlock_irqrestore(&udc->lock, flags);
1189	return retval;
1190}
1191
1192static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1193{
1194	struct mv_udc *udc;
1195	unsigned long flags;
1196	int retval = 0;
1197
1198	udc = container_of(gadget, struct mv_udc, gadget);
1199	spin_lock_irqsave(&udc->lock, flags);
1200
1201	udc->softconnect = (is_on != 0);
1202
1203	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1204			__func__, udc->softconnect, udc->vbus_active);
1205
1206	if (udc->driver && udc->softconnect && udc->vbus_active) {
1207		retval = mv_udc_enable(udc);
1208		if (retval == 0) {
1209			/* Clock is disabled, need re-init registers */
1210			udc_reset(udc);
1211			ep0_reset(udc);
1212			udc_start(udc);
1213		}
1214	} else if (udc->driver && udc->vbus_active) {
1215		/* stop all the transfer in queue*/
1216		stop_activity(udc, udc->driver);
1217		udc_stop(udc);
1218		mv_udc_disable(udc);
1219	}
1220
1221	spin_unlock_irqrestore(&udc->lock, flags);
1222	return retval;
1223}
1224
1225static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1226static int mv_udc_stop(struct usb_gadget *);
1227/* device controller usb_gadget_ops structure */
1228static const struct usb_gadget_ops mv_ops = {
1229
1230	/* returns the current frame number */
1231	.get_frame	= mv_udc_get_frame,
1232
1233	/* tries to wake up the host connected to this gadget */
1234	.wakeup		= mv_udc_wakeup,
1235
1236	/* notify controller that VBUS is powered or not */
1237	.vbus_session	= mv_udc_vbus_session,
1238
1239	/* D+ pullup, software-controlled connect/disconnect to USB host */
1240	.pullup		= mv_udc_pullup,
1241	.udc_start	= mv_udc_start,
1242	.udc_stop	= mv_udc_stop,
1243};
1244
1245static int eps_init(struct mv_udc *udc)
1246{
1247	struct mv_ep	*ep;
1248	char name[14];
1249	int i;
1250
1251	/* initialize ep0 */
1252	ep = &udc->eps[0];
1253	ep->udc = udc;
1254	strncpy(ep->name, "ep0", sizeof(ep->name));
1255	ep->ep.name = ep->name;
1256	ep->ep.ops = &mv_ep_ops;
1257	ep->wedge = 0;
1258	ep->stopped = 0;
1259	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1260	ep->ep_num = 0;
1261	ep->ep.desc = &mv_ep0_desc;
1262	INIT_LIST_HEAD(&ep->queue);
1263
1264	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1265
1266	/* initialize other endpoints */
1267	for (i = 2; i < udc->max_eps * 2; i++) {
1268		ep = &udc->eps[i];
1269		if (i % 2) {
1270			snprintf(name, sizeof(name), "ep%din", i / 2);
1271			ep->direction = EP_DIR_IN;
1272		} else {
1273			snprintf(name, sizeof(name), "ep%dout", i / 2);
1274			ep->direction = EP_DIR_OUT;
1275		}
1276		ep->udc = udc;
1277		strncpy(ep->name, name, sizeof(ep->name));
1278		ep->ep.name = ep->name;
1279
1280		ep->ep.ops = &mv_ep_ops;
1281		ep->stopped = 0;
1282		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1283		ep->ep_num = i / 2;
1284
1285		INIT_LIST_HEAD(&ep->queue);
1286		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1287
1288		ep->dqh = &udc->ep_dqh[i];
1289	}
1290
1291	return 0;
1292}
1293
1294/* delete all endpoint requests, called with spinlock held */
1295static void nuke(struct mv_ep *ep, int status)
1296{
1297	/* called with spinlock held */
1298	ep->stopped = 1;
1299
1300	/* endpoint fifo flush */
1301	mv_ep_fifo_flush(&ep->ep);
1302
1303	while (!list_empty(&ep->queue)) {
1304		struct mv_req *req = NULL;
1305		req = list_entry(ep->queue.next, struct mv_req, queue);
1306		done(ep, req, status);
1307	}
1308}
1309
1310static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1311{
1312	struct mv_ep	*ep;
1313
1314	nuke(&udc->eps[0], -ESHUTDOWN);
1315
1316	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1317		nuke(ep, -ESHUTDOWN);
1318	}
1319
1320	/* report reset; the driver is already quiesced */
1321	if (driver) {
1322		spin_unlock(&udc->lock);
1323		usb_gadget_udc_reset(&udc->gadget, driver);
1324		spin_lock(&udc->lock);
1325	}
1326}
1327/* stop all USB activities */
1328static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1329{
1330	struct mv_ep	*ep;
1331
1332	nuke(&udc->eps[0], -ESHUTDOWN);
1333
1334	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1335		nuke(ep, -ESHUTDOWN);
1336	}
1337
1338	/* report disconnect; the driver is already quiesced */
1339	if (driver) {
1340		spin_unlock(&udc->lock);
1341		driver->disconnect(&udc->gadget);
1342		spin_lock(&udc->lock);
1343	}
1344}
1345
1346static int mv_udc_start(struct usb_gadget *gadget,
1347		struct usb_gadget_driver *driver)
1348{
1349	struct mv_udc *udc;
1350	int retval = 0;
1351	unsigned long flags;
1352
1353	udc = container_of(gadget, struct mv_udc, gadget);
1354
1355	if (udc->driver)
1356		return -EBUSY;
1357
1358	spin_lock_irqsave(&udc->lock, flags);
1359
1360	/* hook up the driver ... */
1361	driver->driver.bus = NULL;
1362	udc->driver = driver;
1363
1364	udc->usb_state = USB_STATE_ATTACHED;
1365	udc->ep0_state = WAIT_FOR_SETUP;
1366	udc->ep0_dir = EP_DIR_OUT;
1367
1368	spin_unlock_irqrestore(&udc->lock, flags);
1369
1370	if (udc->transceiver) {
1371		retval = otg_set_peripheral(udc->transceiver->otg,
1372					&udc->gadget);
1373		if (retval) {
1374			dev_err(&udc->dev->dev,
1375				"unable to register peripheral to otg\n");
1376			udc->driver = NULL;
1377			return retval;
1378		}
1379	}
1380
1381	/* When boot with cable attached, there will be no vbus irq occurred */
1382	if (udc->qwork)
1383		queue_work(udc->qwork, &udc->vbus_work);
1384
1385	return 0;
1386}
1387
1388static int mv_udc_stop(struct usb_gadget *gadget)
1389{
1390	struct mv_udc *udc;
1391	unsigned long flags;
1392
1393	udc = container_of(gadget, struct mv_udc, gadget);
1394
1395	spin_lock_irqsave(&udc->lock, flags);
1396
1397	mv_udc_enable(udc);
1398	udc_stop(udc);
1399
1400	/* stop all usb activities */
1401	udc->gadget.speed = USB_SPEED_UNKNOWN;
1402	stop_activity(udc, NULL);
1403	mv_udc_disable(udc);
1404
1405	spin_unlock_irqrestore(&udc->lock, flags);
1406
1407	/* unbind gadget driver */
1408	udc->driver = NULL;
1409
1410	return 0;
1411}
1412
1413static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1414{
1415	u32 portsc;
1416
1417	portsc = readl(&udc->op_regs->portsc[0]);
1418	portsc |= mode << 16;
1419	writel(portsc, &udc->op_regs->portsc[0]);
1420}
1421
1422static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1423{
1424	struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1425	struct mv_req *req = container_of(_req, struct mv_req, req);
1426	struct mv_udc *udc;
1427	unsigned long flags;
1428
1429	udc = mvep->udc;
1430
1431	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1432
1433	spin_lock_irqsave(&udc->lock, flags);
1434	if (req->test_mode) {
1435		mv_set_ptc(udc, req->test_mode);
1436		req->test_mode = 0;
1437	}
1438	spin_unlock_irqrestore(&udc->lock, flags);
1439}
1440
1441static int
1442udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1443{
1444	int retval = 0;
1445	struct mv_req *req;
1446	struct mv_ep *ep;
1447
1448	ep = &udc->eps[0];
1449	udc->ep0_dir = direction;
1450	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1451
1452	req = udc->status_req;
1453
1454	/* fill in the reqest structure */
1455	if (empty == false) {
1456		*((u16 *) req->req.buf) = cpu_to_le16(status);
1457		req->req.length = 2;
1458	} else
1459		req->req.length = 0;
1460
1461	req->ep = ep;
1462	req->req.status = -EINPROGRESS;
1463	req->req.actual = 0;
1464	if (udc->test_mode) {
1465		req->req.complete = prime_status_complete;
1466		req->test_mode = udc->test_mode;
1467		udc->test_mode = 0;
1468	} else
1469		req->req.complete = NULL;
1470	req->dtd_count = 0;
1471
1472	if (req->req.dma == DMA_ADDR_INVALID) {
1473		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1474				req->req.buf, req->req.length,
1475				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1476		req->mapped = 1;
1477	}
1478
1479	/* prime the data phase */
1480	if (!req_to_dtd(req)) {
1481		retval = queue_dtd(ep, req);
1482		if (retval) {
1483			dev_err(&udc->dev->dev,
1484				"Failed to queue dtd when prime status\n");
1485			goto out;
1486		}
1487	} else{	/* no mem */
1488		retval = -ENOMEM;
1489		dev_err(&udc->dev->dev,
1490			"Failed to dma_pool_alloc when prime status\n");
1491		goto out;
1492	}
1493
1494	list_add_tail(&req->queue, &ep->queue);
1495
1496	return 0;
1497out:
1498	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1499
1500	return retval;
1501}
1502
1503static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1504{
1505	if (index <= TEST_FORCE_EN) {
1506		udc->test_mode = index;
1507		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1508			ep0_stall(udc);
1509	} else
1510		dev_err(&udc->dev->dev,
1511			"This test mode(%d) is not supported\n", index);
1512}
1513
1514static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1515{
1516	udc->dev_addr = (u8)setup->wValue;
1517
1518	/* update usb state */
1519	udc->usb_state = USB_STATE_ADDRESS;
1520
1521	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1522		ep0_stall(udc);
1523}
1524
1525static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1526	struct usb_ctrlrequest *setup)
1527{
1528	u16 status = 0;
1529	int retval;
1530
1531	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1532		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1533		return;
1534
1535	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1536		status = 1 << USB_DEVICE_SELF_POWERED;
1537		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1538	} else if ((setup->bRequestType & USB_RECIP_MASK)
1539			== USB_RECIP_INTERFACE) {
1540		/* get interface status */
1541		status = 0;
1542	} else if ((setup->bRequestType & USB_RECIP_MASK)
1543			== USB_RECIP_ENDPOINT) {
1544		u8 ep_num, direction;
1545
1546		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1547		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1548				? EP_DIR_IN : EP_DIR_OUT;
1549		status = ep_is_stall(udc, ep_num, direction)
1550				<< USB_ENDPOINT_HALT;
1551	}
1552
1553	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1554	if (retval)
1555		ep0_stall(udc);
1556	else
1557		udc->ep0_state = DATA_STATE_XMIT;
1558}
1559
1560static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1561{
1562	u8 ep_num;
1563	u8 direction;
1564	struct mv_ep *ep;
1565
1566	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1567		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1568		switch (setup->wValue) {
1569		case USB_DEVICE_REMOTE_WAKEUP:
1570			udc->remote_wakeup = 0;
1571			break;
1572		default:
1573			goto out;
1574		}
1575	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1576		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1577		switch (setup->wValue) {
1578		case USB_ENDPOINT_HALT:
1579			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1580			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1581				? EP_DIR_IN : EP_DIR_OUT;
1582			if (setup->wValue != 0 || setup->wLength != 0
1583				|| ep_num > udc->max_eps)
1584				goto out;
1585			ep = &udc->eps[ep_num * 2 + direction];
1586			if (ep->wedge == 1)
1587				break;
1588			spin_unlock(&udc->lock);
1589			ep_set_stall(udc, ep_num, direction, 0);
1590			spin_lock(&udc->lock);
1591			break;
1592		default:
1593			goto out;
1594		}
1595	} else
1596		goto out;
1597
1598	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1599		ep0_stall(udc);
1600out:
1601	return;
1602}
1603
1604static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1605{
1606	u8 ep_num;
1607	u8 direction;
1608
1609	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1610		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1611		switch (setup->wValue) {
1612		case USB_DEVICE_REMOTE_WAKEUP:
1613			udc->remote_wakeup = 1;
1614			break;
1615		case USB_DEVICE_TEST_MODE:
1616			if (setup->wIndex & 0xFF
1617				||  udc->gadget.speed != USB_SPEED_HIGH)
1618				ep0_stall(udc);
1619
1620			if (udc->usb_state != USB_STATE_CONFIGURED
1621				&& udc->usb_state != USB_STATE_ADDRESS
1622				&& udc->usb_state != USB_STATE_DEFAULT)
1623				ep0_stall(udc);
1624
1625			mv_udc_testmode(udc, (setup->wIndex >> 8));
1626			goto out;
1627		default:
1628			goto out;
1629		}
1630	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1631		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1632		switch (setup->wValue) {
1633		case USB_ENDPOINT_HALT:
1634			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1635			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1636				? EP_DIR_IN : EP_DIR_OUT;
1637			if (setup->wValue != 0 || setup->wLength != 0
1638				|| ep_num > udc->max_eps)
1639				goto out;
1640			spin_unlock(&udc->lock);
1641			ep_set_stall(udc, ep_num, direction, 1);
1642			spin_lock(&udc->lock);
1643			break;
1644		default:
1645			goto out;
1646		}
1647	} else
1648		goto out;
1649
1650	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1651		ep0_stall(udc);
1652out:
1653	return;
1654}
1655
1656static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1657	struct usb_ctrlrequest *setup)
1658	__releases(&ep->udc->lock)
1659	__acquires(&ep->udc->lock)
1660{
1661	bool delegate = false;
1662
1663	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1664
1665	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1666			setup->bRequestType, setup->bRequest,
1667			setup->wValue, setup->wIndex, setup->wLength);
1668	/* We process some standard setup requests here */
1669	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1670		switch (setup->bRequest) {
1671		case USB_REQ_GET_STATUS:
1672			ch9getstatus(udc, ep_num, setup);
1673			break;
1674
1675		case USB_REQ_SET_ADDRESS:
1676			ch9setaddress(udc, setup);
1677			break;
1678
1679		case USB_REQ_CLEAR_FEATURE:
1680			ch9clearfeature(udc, setup);
1681			break;
1682
1683		case USB_REQ_SET_FEATURE:
1684			ch9setfeature(udc, setup);
1685			break;
1686
1687		default:
1688			delegate = true;
1689		}
1690	} else
1691		delegate = true;
1692
1693	/* delegate USB standard requests to the gadget driver */
1694	if (delegate == true) {
1695		/* USB requests handled by gadget */
1696		if (setup->wLength) {
1697			/* DATA phase from gadget, STATUS phase from udc */
1698			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1699					?  EP_DIR_IN : EP_DIR_OUT;
1700			spin_unlock(&udc->lock);
1701			if (udc->driver->setup(&udc->gadget,
1702				&udc->local_setup_buff) < 0)
1703				ep0_stall(udc);
1704			spin_lock(&udc->lock);
1705			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1706					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1707		} else {
1708			/* no DATA phase, IN STATUS phase from gadget */
1709			udc->ep0_dir = EP_DIR_IN;
1710			spin_unlock(&udc->lock);
1711			if (udc->driver->setup(&udc->gadget,
1712				&udc->local_setup_buff) < 0)
1713				ep0_stall(udc);
1714			spin_lock(&udc->lock);
1715			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1716		}
1717	}
1718}
1719
1720/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1721static void ep0_req_complete(struct mv_udc *udc,
1722	struct mv_ep *ep0, struct mv_req *req)
1723{
1724	u32 new_addr;
1725
1726	if (udc->usb_state == USB_STATE_ADDRESS) {
1727		/* set the new address */
1728		new_addr = (u32)udc->dev_addr;
1729		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1730			&udc->op_regs->deviceaddr);
1731	}
1732
1733	done(ep0, req, 0);
1734
1735	switch (udc->ep0_state) {
1736	case DATA_STATE_XMIT:
1737		/* receive status phase */
1738		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1739			ep0_stall(udc);
1740		break;
1741	case DATA_STATE_RECV:
1742		/* send status phase */
1743		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1744			ep0_stall(udc);
1745		break;
1746	case WAIT_FOR_OUT_STATUS:
1747		udc->ep0_state = WAIT_FOR_SETUP;
1748		break;
1749	case WAIT_FOR_SETUP:
1750		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1751		break;
1752	default:
1753		ep0_stall(udc);
1754		break;
1755	}
1756}
1757
1758static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1759{
1760	u32 temp;
1761	struct mv_dqh *dqh;
1762
1763	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1764
1765	/* Clear bit in ENDPTSETUPSTAT */
1766	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1767
1768	/* while a hazard exists when setup package arrives */
1769	do {
1770		/* Set Setup Tripwire */
1771		temp = readl(&udc->op_regs->usbcmd);
1772		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1773
1774		/* Copy the setup packet to local buffer */
1775		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1776	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1777
1778	/* Clear Setup Tripwire */
1779	temp = readl(&udc->op_regs->usbcmd);
1780	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1781}
1782
1783static void irq_process_tr_complete(struct mv_udc *udc)
1784{
1785	u32 tmp, bit_pos;
1786	int i, ep_num = 0, direction = 0;
1787	struct mv_ep	*curr_ep;
1788	struct mv_req *curr_req, *temp_req;
1789	int status;
1790
1791	/*
1792	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1793	 * because the setup packets are to be read ASAP
1794	 */
1795
1796	/* Process all Setup packet received interrupts */
1797	tmp = readl(&udc->op_regs->epsetupstat);
1798
1799	if (tmp) {
1800		for (i = 0; i < udc->max_eps; i++) {
1801			if (tmp & (1 << i)) {
1802				get_setup_data(udc, i,
1803					(u8 *)(&udc->local_setup_buff));
1804				handle_setup_packet(udc, i,
1805					&udc->local_setup_buff);
1806			}
1807		}
1808	}
1809
1810	/* Don't clear the endpoint setup status register here.
1811	 * It is cleared as a setup packet is read out of the buffer
1812	 */
1813
1814	/* Process non-setup transaction complete interrupts */
1815	tmp = readl(&udc->op_regs->epcomplete);
1816
1817	if (!tmp)
1818		return;
1819
1820	writel(tmp, &udc->op_regs->epcomplete);
1821
1822	for (i = 0; i < udc->max_eps * 2; i++) {
1823		ep_num = i >> 1;
1824		direction = i % 2;
1825
1826		bit_pos = 1 << (ep_num + 16 * direction);
1827
1828		if (!(bit_pos & tmp))
1829			continue;
1830
1831		if (i == 1)
1832			curr_ep = &udc->eps[0];
1833		else
1834			curr_ep = &udc->eps[i];
1835		/* process the req queue until an uncomplete request */
1836		list_for_each_entry_safe(curr_req, temp_req,
1837			&curr_ep->queue, queue) {
1838			status = process_ep_req(udc, i, curr_req);
1839			if (status)
1840				break;
1841
1842			/* write back status to req */
1843			curr_req->req.status = status;
1844
1845			/* ep0 request completion */
1846			if (ep_num == 0) {
1847				ep0_req_complete(udc, curr_ep, curr_req);
1848				break;
1849			} else {
1850				done(curr_ep, curr_req, status);
1851			}
1852		}
1853	}
1854}
1855
1856static void irq_process_reset(struct mv_udc *udc)
1857{
1858	u32 tmp;
1859	unsigned int loops;
1860
1861	udc->ep0_dir = EP_DIR_OUT;
1862	udc->ep0_state = WAIT_FOR_SETUP;
1863	udc->remote_wakeup = 0;		/* default to 0 on reset */
1864
1865	/* The address bits are past bit 25-31. Set the address */
1866	tmp = readl(&udc->op_regs->deviceaddr);
1867	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1868	writel(tmp, &udc->op_regs->deviceaddr);
1869
1870	/* Clear all the setup token semaphores */
1871	tmp = readl(&udc->op_regs->epsetupstat);
1872	writel(tmp, &udc->op_regs->epsetupstat);
1873
1874	/* Clear all the endpoint complete status bits */
1875	tmp = readl(&udc->op_regs->epcomplete);
1876	writel(tmp, &udc->op_regs->epcomplete);
1877
1878	/* wait until all endptprime bits cleared */
1879	loops = LOOPS(PRIME_TIMEOUT);
1880	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1881		if (loops == 0) {
1882			dev_err(&udc->dev->dev,
1883				"Timeout for ENDPTPRIME = 0x%x\n",
1884				readl(&udc->op_regs->epprime));
1885			break;
1886		}
1887		loops--;
1888		udelay(LOOPS_USEC);
1889	}
1890
1891	/* Write 1s to the Flush register */
1892	writel((u32)~0, &udc->op_regs->epflush);
1893
1894	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1895		dev_info(&udc->dev->dev, "usb bus reset\n");
1896		udc->usb_state = USB_STATE_DEFAULT;
1897		/* reset all the queues, stop all USB activities */
1898		gadget_reset(udc, udc->driver);
1899	} else {
1900		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1901			readl(&udc->op_regs->portsc));
1902
1903		/*
1904		 * re-initialize
1905		 * controller reset
1906		 */
1907		udc_reset(udc);
1908
1909		/* reset all the queues, stop all USB activities */
1910		stop_activity(udc, udc->driver);
1911
1912		/* reset ep0 dQH and endptctrl */
1913		ep0_reset(udc);
1914
1915		/* enable interrupt and set controller to run state */
1916		udc_start(udc);
1917
1918		udc->usb_state = USB_STATE_ATTACHED;
1919	}
1920}
1921
1922static void handle_bus_resume(struct mv_udc *udc)
1923{
1924	udc->usb_state = udc->resume_state;
1925	udc->resume_state = 0;
1926
1927	/* report resume to the driver */
1928	if (udc->driver) {
1929		if (udc->driver->resume) {
1930			spin_unlock(&udc->lock);
1931			udc->driver->resume(&udc->gadget);
1932			spin_lock(&udc->lock);
1933		}
1934	}
1935}
1936
1937static void irq_process_suspend(struct mv_udc *udc)
1938{
1939	udc->resume_state = udc->usb_state;
1940	udc->usb_state = USB_STATE_SUSPENDED;
1941
1942	if (udc->driver->suspend) {
1943		spin_unlock(&udc->lock);
1944		udc->driver->suspend(&udc->gadget);
1945		spin_lock(&udc->lock);
1946	}
1947}
1948
1949static void irq_process_port_change(struct mv_udc *udc)
1950{
1951	u32 portsc;
1952
1953	portsc = readl(&udc->op_regs->portsc[0]);
1954	if (!(portsc & PORTSCX_PORT_RESET)) {
1955		/* Get the speed */
1956		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1957		switch (speed) {
1958		case PORTSCX_PORT_SPEED_HIGH:
1959			udc->gadget.speed = USB_SPEED_HIGH;
1960			break;
1961		case PORTSCX_PORT_SPEED_FULL:
1962			udc->gadget.speed = USB_SPEED_FULL;
1963			break;
1964		case PORTSCX_PORT_SPEED_LOW:
1965			udc->gadget.speed = USB_SPEED_LOW;
1966			break;
1967		default:
1968			udc->gadget.speed = USB_SPEED_UNKNOWN;
1969			break;
1970		}
1971	}
1972
1973	if (portsc & PORTSCX_PORT_SUSPEND) {
1974		udc->resume_state = udc->usb_state;
1975		udc->usb_state = USB_STATE_SUSPENDED;
1976		if (udc->driver->suspend) {
1977			spin_unlock(&udc->lock);
1978			udc->driver->suspend(&udc->gadget);
1979			spin_lock(&udc->lock);
1980		}
1981	}
1982
1983	if (!(portsc & PORTSCX_PORT_SUSPEND)
1984		&& udc->usb_state == USB_STATE_SUSPENDED) {
1985		handle_bus_resume(udc);
1986	}
1987
1988	if (!udc->resume_state)
1989		udc->usb_state = USB_STATE_DEFAULT;
1990}
1991
1992static void irq_process_error(struct mv_udc *udc)
1993{
1994	/* Increment the error count */
1995	udc->errors++;
1996}
1997
1998static irqreturn_t mv_udc_irq(int irq, void *dev)
1999{
2000	struct mv_udc *udc = (struct mv_udc *)dev;
2001	u32 status, intr;
2002
2003	/* Disable ISR when stopped bit is set */
2004	if (udc->stopped)
2005		return IRQ_NONE;
2006
2007	spin_lock(&udc->lock);
2008
2009	status = readl(&udc->op_regs->usbsts);
2010	intr = readl(&udc->op_regs->usbintr);
2011	status &= intr;
2012
2013	if (status == 0) {
2014		spin_unlock(&udc->lock);
2015		return IRQ_NONE;
2016	}
2017
2018	/* Clear all the interrupts occurred */
2019	writel(status, &udc->op_regs->usbsts);
2020
2021	if (status & USBSTS_ERR)
2022		irq_process_error(udc);
2023
2024	if (status & USBSTS_RESET)
2025		irq_process_reset(udc);
2026
2027	if (status & USBSTS_PORT_CHANGE)
2028		irq_process_port_change(udc);
2029
2030	if (status & USBSTS_INT)
2031		irq_process_tr_complete(udc);
2032
2033	if (status & USBSTS_SUSPEND)
2034		irq_process_suspend(udc);
2035
2036	spin_unlock(&udc->lock);
2037
2038	return IRQ_HANDLED;
2039}
2040
2041static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2042{
2043	struct mv_udc *udc = (struct mv_udc *)dev;
2044
2045	/* polling VBUS and init phy may cause too much time*/
2046	if (udc->qwork)
2047		queue_work(udc->qwork, &udc->vbus_work);
2048
2049	return IRQ_HANDLED;
2050}
2051
2052static void mv_udc_vbus_work(struct work_struct *work)
2053{
2054	struct mv_udc *udc;
2055	unsigned int vbus;
2056
2057	udc = container_of(work, struct mv_udc, vbus_work);
2058	if (!udc->pdata->vbus)
2059		return;
2060
2061	vbus = udc->pdata->vbus->poll();
2062	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2063
2064	if (vbus == VBUS_HIGH)
2065		mv_udc_vbus_session(&udc->gadget, 1);
2066	else if (vbus == VBUS_LOW)
2067		mv_udc_vbus_session(&udc->gadget, 0);
2068}
2069
2070/* release device structure */
2071static void gadget_release(struct device *_dev)
2072{
2073	struct mv_udc *udc;
2074
2075	udc = dev_get_drvdata(_dev);
2076
2077	complete(udc->done);
2078}
2079
2080static int mv_udc_remove(struct platform_device *pdev)
2081{
2082	struct mv_udc *udc;
2083
2084	udc = platform_get_drvdata(pdev);
2085
2086	usb_del_gadget_udc(&udc->gadget);
2087
2088	if (udc->qwork) {
2089		flush_workqueue(udc->qwork);
2090		destroy_workqueue(udc->qwork);
2091	}
2092
2093	/* free memory allocated in probe */
2094	if (udc->dtd_pool)
2095		dma_pool_destroy(udc->dtd_pool);
2096
2097	if (udc->ep_dqh)
2098		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2099			udc->ep_dqh, udc->ep_dqh_dma);
2100
2101	mv_udc_disable(udc);
2102
2103	/* free dev, wait for the release() finished */
2104	wait_for_completion(udc->done);
2105
2106	return 0;
2107}
2108
2109static int mv_udc_probe(struct platform_device *pdev)
2110{
2111	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2112	struct mv_udc *udc;
2113	int retval = 0;
2114	struct resource *r;
2115	size_t size;
2116
2117	if (pdata == NULL) {
2118		dev_err(&pdev->dev, "missing platform_data\n");
2119		return -ENODEV;
2120	}
2121
2122	udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2123	if (udc == NULL)
2124		return -ENOMEM;
2125
2126	udc->done = &release_done;
2127	udc->pdata = dev_get_platdata(&pdev->dev);
2128	spin_lock_init(&udc->lock);
2129
2130	udc->dev = pdev;
2131
2132	if (pdata->mode == MV_USB_MODE_OTG) {
2133		udc->transceiver = devm_usb_get_phy(&pdev->dev,
2134					USB_PHY_TYPE_USB2);
2135		if (IS_ERR(udc->transceiver)) {
2136			retval = PTR_ERR(udc->transceiver);
2137
2138			if (retval == -ENXIO)
2139				return retval;
2140
2141			udc->transceiver = NULL;
2142			return -EPROBE_DEFER;
2143		}
2144	}
2145
2146	/* udc only have one sysclk. */
2147	udc->clk = devm_clk_get(&pdev->dev, NULL);
2148	if (IS_ERR(udc->clk))
2149		return PTR_ERR(udc->clk);
2150
2151	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2152	if (r == NULL) {
2153		dev_err(&pdev->dev, "no I/O memory resource defined\n");
2154		return -ENODEV;
2155	}
2156
2157	udc->cap_regs = (struct mv_cap_regs __iomem *)
2158		devm_ioremap(&pdev->dev, r->start, resource_size(r));
2159	if (udc->cap_regs == NULL) {
2160		dev_err(&pdev->dev, "failed to map I/O memory\n");
2161		return -EBUSY;
2162	}
2163
2164	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2165	if (r == NULL) {
2166		dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2167		return -ENODEV;
2168	}
2169
2170	udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2171	if (udc->phy_regs == NULL) {
2172		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2173		return -EBUSY;
2174	}
2175
2176	/* we will acces controller register, so enable the clk */
2177	retval = mv_udc_enable_internal(udc);
2178	if (retval)
2179		return retval;
2180
2181	udc->op_regs =
2182		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2183		+ (readl(&udc->cap_regs->caplength_hciversion)
2184			& CAPLENGTH_MASK));
2185	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2186
2187	/*
2188	 * some platform will use usb to download image, it may not disconnect
2189	 * usb gadget before loading kernel. So first stop udc here.
2190	 */
2191	udc_stop(udc);
2192	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2193
2194	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2195	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2196	udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2197					&udc->ep_dqh_dma, GFP_KERNEL);
2198
2199	if (udc->ep_dqh == NULL) {
2200		dev_err(&pdev->dev, "allocate dQH memory failed\n");
2201		retval = -ENOMEM;
2202		goto err_disable_clock;
2203	}
2204	udc->ep_dqh_size = size;
2205
2206	/* create dTD dma_pool resource */
2207	udc->dtd_pool = dma_pool_create("mv_dtd",
2208			&pdev->dev,
2209			sizeof(struct mv_dtd),
2210			DTD_ALIGNMENT,
2211			DMA_BOUNDARY);
2212
2213	if (!udc->dtd_pool) {
2214		retval = -ENOMEM;
2215		goto err_free_dma;
2216	}
2217
2218	size = udc->max_eps * sizeof(struct mv_ep) *2;
2219	udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2220	if (udc->eps == NULL) {
2221		retval = -ENOMEM;
2222		goto err_destroy_dma;
2223	}
2224
2225	/* initialize ep0 status request structure */
2226	udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2227					GFP_KERNEL);
2228	if (!udc->status_req) {
2229		retval = -ENOMEM;
2230		goto err_destroy_dma;
2231	}
2232	INIT_LIST_HEAD(&udc->status_req->queue);
2233
2234	/* allocate a small amount of memory to get valid address */
2235	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2236	udc->status_req->req.dma = DMA_ADDR_INVALID;
2237
2238	udc->resume_state = USB_STATE_NOTATTACHED;
2239	udc->usb_state = USB_STATE_POWERED;
2240	udc->ep0_dir = EP_DIR_OUT;
2241	udc->remote_wakeup = 0;
2242
2243	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2244	if (r == NULL) {
2245		dev_err(&pdev->dev, "no IRQ resource defined\n");
2246		retval = -ENODEV;
2247		goto err_destroy_dma;
2248	}
2249	udc->irq = r->start;
2250	if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2251		IRQF_SHARED, driver_name, udc)) {
2252		dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2253			udc->irq);
2254		retval = -ENODEV;
2255		goto err_destroy_dma;
2256	}
2257
2258	/* initialize gadget structure */
2259	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2260	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2261	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2262	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2263	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2264
2265	/* the "gadget" abstracts/virtualizes the controller */
2266	udc->gadget.name = driver_name;		/* gadget name */
2267
2268	eps_init(udc);
2269
2270	/* VBUS detect: we can disable/enable clock on demand.*/
2271	if (udc->transceiver)
2272		udc->clock_gating = 1;
2273	else if (pdata->vbus) {
2274		udc->clock_gating = 1;
2275		retval = devm_request_threaded_irq(&pdev->dev,
2276				pdata->vbus->irq, NULL,
2277				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2278		if (retval) {
2279			dev_info(&pdev->dev,
2280				"Can not request irq for VBUS, "
2281				"disable clock gating\n");
2282			udc->clock_gating = 0;
2283		}
2284
2285		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2286		if (!udc->qwork) {
2287			dev_err(&pdev->dev, "cannot create workqueue\n");
2288			retval = -ENOMEM;
2289			goto err_destroy_dma;
2290		}
2291
2292		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2293	}
2294
2295	/*
2296	 * When clock gating is supported, we can disable clk and phy.
2297	 * If not, it means that VBUS detection is not supported, we
2298	 * have to enable vbus active all the time to let controller work.
2299	 */
2300	if (udc->clock_gating)
2301		mv_udc_disable_internal(udc);
2302	else
2303		udc->vbus_active = 1;
2304
2305	retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2306			gadget_release);
2307	if (retval)
2308		goto err_create_workqueue;
2309
2310	platform_set_drvdata(pdev, udc);
2311	dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2312		udc->clock_gating ? "with" : "without");
2313
2314	return 0;
2315
2316err_create_workqueue:
2317	destroy_workqueue(udc->qwork);
2318err_destroy_dma:
2319	dma_pool_destroy(udc->dtd_pool);
2320err_free_dma:
2321	dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2322			udc->ep_dqh, udc->ep_dqh_dma);
2323err_disable_clock:
2324	mv_udc_disable_internal(udc);
2325
2326	return retval;
2327}
2328
2329#ifdef CONFIG_PM
2330static int mv_udc_suspend(struct device *dev)
2331{
2332	struct mv_udc *udc;
2333
2334	udc = dev_get_drvdata(dev);
2335
2336	/* if OTG is enabled, the following will be done in OTG driver*/
2337	if (udc->transceiver)
2338		return 0;
2339
2340	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2341		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2342			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2343			return -EAGAIN;
2344		}
2345
2346	/*
2347	 * only cable is unplugged, udc can suspend.
2348	 * So do not care about clock_gating == 1.
2349	 */
2350	if (!udc->clock_gating) {
2351		udc_stop(udc);
2352
2353		spin_lock_irq(&udc->lock);
2354		/* stop all usb activities */
2355		stop_activity(udc, udc->driver);
2356		spin_unlock_irq(&udc->lock);
2357
2358		mv_udc_disable_internal(udc);
2359	}
2360
2361	return 0;
2362}
2363
2364static int mv_udc_resume(struct device *dev)
2365{
2366	struct mv_udc *udc;
2367	int retval;
2368
2369	udc = dev_get_drvdata(dev);
2370
2371	/* if OTG is enabled, the following will be done in OTG driver*/
2372	if (udc->transceiver)
2373		return 0;
2374
2375	if (!udc->clock_gating) {
2376		retval = mv_udc_enable_internal(udc);
2377		if (retval)
2378			return retval;
2379
2380		if (udc->driver && udc->softconnect) {
2381			udc_reset(udc);
2382			ep0_reset(udc);
2383			udc_start(udc);
2384		}
2385	}
2386
2387	return 0;
2388}
2389
2390static const struct dev_pm_ops mv_udc_pm_ops = {
2391	.suspend	= mv_udc_suspend,
2392	.resume		= mv_udc_resume,
2393};
2394#endif
2395
2396static void mv_udc_shutdown(struct platform_device *pdev)
2397{
2398	struct mv_udc *udc;
2399	u32 mode;
2400
2401	udc = platform_get_drvdata(pdev);
2402	/* reset controller mode to IDLE */
2403	mv_udc_enable(udc);
2404	mode = readl(&udc->op_regs->usbmode);
2405	mode &= ~3;
2406	writel(mode, &udc->op_regs->usbmode);
2407	mv_udc_disable(udc);
2408}
2409
2410static struct platform_driver udc_driver = {
2411	.probe		= mv_udc_probe,
2412	.remove		= mv_udc_remove,
2413	.shutdown	= mv_udc_shutdown,
2414	.driver		= {
2415		.name	= "mv-udc",
2416#ifdef CONFIG_PM
2417		.pm	= &mv_udc_pm_ops,
2418#endif
2419	},
2420};
2421
2422module_platform_driver(udc_driver);
2423MODULE_ALIAS("platform:mv-udc");
2424MODULE_DESCRIPTION(DRIVER_DESC);
2425MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2426MODULE_VERSION(DRIVER_VERSION);
2427MODULE_LICENSE("GPL");
2428