1/*
2 * udc.c - ChipIdea UDC driver
3 *
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5 *
6 * Author: David Lopo
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/err.h>
17#include <linux/irqreturn.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/pm_runtime.h>
21#include <linux/usb/ch9.h>
22#include <linux/usb/gadget.h>
23#include <linux/usb/otg-fsm.h>
24#include <linux/usb/chipidea.h>
25
26#include "ci.h"
27#include "udc.h"
28#include "bits.h"
29#include "debug.h"
30#include "otg.h"
31#include "otg_fsm.h"
32
33/* control endpoint description */
34static const struct usb_endpoint_descriptor
35ctrl_endpt_out_desc = {
36	.bLength         = USB_DT_ENDPOINT_SIZE,
37	.bDescriptorType = USB_DT_ENDPOINT,
38
39	.bEndpointAddress = USB_DIR_OUT,
40	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
41	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
42};
43
44static const struct usb_endpoint_descriptor
45ctrl_endpt_in_desc = {
46	.bLength         = USB_DT_ENDPOINT_SIZE,
47	.bDescriptorType = USB_DT_ENDPOINT,
48
49	.bEndpointAddress = USB_DIR_IN,
50	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
51	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
52};
53
54/**
55 * hw_ep_bit: calculates the bit number
56 * @num: endpoint number
57 * @dir: endpoint direction
58 *
59 * This function returns bit number
60 */
61static inline int hw_ep_bit(int num, int dir)
62{
63	return num + (dir ? 16 : 0);
64}
65
66static inline int ep_to_bit(struct ci_hdrc *ci, int n)
67{
68	int fill = 16 - ci->hw_ep_max / 2;
69
70	if (n >= ci->hw_ep_max / 2)
71		n += fill;
72
73	return n;
74}
75
76/**
77 * hw_device_state: enables/disables interrupts (execute without interruption)
78 * @dma: 0 => disable, !0 => enable and set dma engine
79 *
80 * This function returns an error code
81 */
82static int hw_device_state(struct ci_hdrc *ci, u32 dma)
83{
84	if (dma) {
85		hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
86		/* interrupt, error, port change, reset, sleep/suspend */
87		hw_write(ci, OP_USBINTR, ~0,
88			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
89	} else {
90		hw_write(ci, OP_USBINTR, ~0, 0);
91	}
92	return 0;
93}
94
95/**
96 * hw_ep_flush: flush endpoint fifo (execute without interruption)
97 * @num: endpoint number
98 * @dir: endpoint direction
99 *
100 * This function returns an error code
101 */
102static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
103{
104	int n = hw_ep_bit(num, dir);
105
106	do {
107		/* flush any pending transfer */
108		hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
109		while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
110			cpu_relax();
111	} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
112
113	return 0;
114}
115
116/**
117 * hw_ep_disable: disables endpoint (execute without interruption)
118 * @num: endpoint number
119 * @dir: endpoint direction
120 *
121 * This function returns an error code
122 */
123static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
124{
125	hw_ep_flush(ci, num, dir);
126	hw_write(ci, OP_ENDPTCTRL + num,
127		 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
128	return 0;
129}
130
131/**
132 * hw_ep_enable: enables endpoint (execute without interruption)
133 * @num:  endpoint number
134 * @dir:  endpoint direction
135 * @type: endpoint type
136 *
137 * This function returns an error code
138 */
139static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
140{
141	u32 mask, data;
142
143	if (dir) {
144		mask  = ENDPTCTRL_TXT;  /* type    */
145		data  = type << __ffs(mask);
146
147		mask |= ENDPTCTRL_TXS;  /* unstall */
148		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
149		data |= ENDPTCTRL_TXR;
150		mask |= ENDPTCTRL_TXE;  /* enable  */
151		data |= ENDPTCTRL_TXE;
152	} else {
153		mask  = ENDPTCTRL_RXT;  /* type    */
154		data  = type << __ffs(mask);
155
156		mask |= ENDPTCTRL_RXS;  /* unstall */
157		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
158		data |= ENDPTCTRL_RXR;
159		mask |= ENDPTCTRL_RXE;  /* enable  */
160		data |= ENDPTCTRL_RXE;
161	}
162	hw_write(ci, OP_ENDPTCTRL + num, mask, data);
163	return 0;
164}
165
166/**
167 * hw_ep_get_halt: return endpoint halt status
168 * @num: endpoint number
169 * @dir: endpoint direction
170 *
171 * This function returns 1 if endpoint halted
172 */
173static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
174{
175	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
176
177	return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
178}
179
180/**
181 * hw_ep_prime: primes endpoint (execute without interruption)
182 * @num:     endpoint number
183 * @dir:     endpoint direction
184 * @is_ctrl: true if control endpoint
185 *
186 * This function returns an error code
187 */
188static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
189{
190	int n = hw_ep_bit(num, dir);
191
192	if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
193		return -EAGAIN;
194
195	hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
196
197	while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
198		cpu_relax();
199	if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
200		return -EAGAIN;
201
202	/* status shoult be tested according with manual but it doesn't work */
203	return 0;
204}
205
206/**
207 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
208 *                 without interruption)
209 * @num:   endpoint number
210 * @dir:   endpoint direction
211 * @value: true => stall, false => unstall
212 *
213 * This function returns an error code
214 */
215static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
216{
217	if (value != 0 && value != 1)
218		return -EINVAL;
219
220	do {
221		enum ci_hw_regs reg = OP_ENDPTCTRL + num;
222		u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
223		u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
224
225		/* data toggle - reserved for EP0 but it's in ESS */
226		hw_write(ci, reg, mask_xs|mask_xr,
227			  value ? mask_xs : mask_xr);
228	} while (value != hw_ep_get_halt(ci, num, dir));
229
230	return 0;
231}
232
233/**
234 * hw_is_port_high_speed: test if port is high speed
235 *
236 * This function returns true if high speed port
237 */
238static int hw_port_is_high_speed(struct ci_hdrc *ci)
239{
240	return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
241		hw_read(ci, OP_PORTSC, PORTSC_HSP);
242}
243
244/**
245 * hw_test_and_clear_complete: test & clear complete status (execute without
246 *                             interruption)
247 * @n: endpoint number
248 *
249 * This function returns complete status
250 */
251static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
252{
253	n = ep_to_bit(ci, n);
254	return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
255}
256
257/**
258 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
259 *                                without interruption)
260 *
261 * This function returns active interrutps
262 */
263static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
264{
265	u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
266
267	hw_write(ci, OP_USBSTS, ~0, reg);
268	return reg;
269}
270
271/**
272 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
273 *                                interruption)
274 *
275 * This function returns guard value
276 */
277static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
278{
279	return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
280}
281
282/**
283 * hw_test_and_set_setup_guard: test & set setup guard (execute without
284 *                              interruption)
285 *
286 * This function returns guard value
287 */
288static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
289{
290	return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
291}
292
293/**
294 * hw_usb_set_address: configures USB address (execute without interruption)
295 * @value: new USB address
296 *
297 * This function explicitly sets the address, without the "USBADRA" (advance)
298 * feature, which is not supported by older versions of the controller.
299 */
300static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
301{
302	hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
303		 value << __ffs(DEVICEADDR_USBADR));
304}
305
306/**
307 * hw_usb_reset: restart device after a bus reset (execute without
308 *               interruption)
309 *
310 * This function returns an error code
311 */
312static int hw_usb_reset(struct ci_hdrc *ci)
313{
314	hw_usb_set_address(ci, 0);
315
316	/* ESS flushes only at end?!? */
317	hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
318
319	/* clear setup token semaphores */
320	hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
321
322	/* clear complete status */
323	hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
324
325	/* wait until all bits cleared */
326	while (hw_read(ci, OP_ENDPTPRIME, ~0))
327		udelay(10);             /* not RTOS friendly */
328
329	/* reset all endpoints ? */
330
331	/* reset internal status and wait for further instructions
332	   no need to verify the port reset status (ESS does it) */
333
334	return 0;
335}
336
337/******************************************************************************
338 * UTIL block
339 *****************************************************************************/
340
341static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
342			  unsigned length)
343{
344	int i;
345	u32 temp;
346	struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
347						  GFP_ATOMIC);
348
349	if (node == NULL)
350		return -ENOMEM;
351
352	node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
353				   &node->dma);
354	if (node->ptr == NULL) {
355		kfree(node);
356		return -ENOMEM;
357	}
358
359	memset(node->ptr, 0, sizeof(struct ci_hw_td));
360	node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
361	node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
362	node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
363	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
364		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
365
366		if (hwreq->req.length == 0
367				|| hwreq->req.length % hwep->ep.maxpacket)
368			mul++;
369		node->ptr->token |= mul << __ffs(TD_MULTO);
370	}
371
372	temp = (u32) (hwreq->req.dma + hwreq->req.actual);
373	if (length) {
374		node->ptr->page[0] = cpu_to_le32(temp);
375		for (i = 1; i < TD_PAGE_COUNT; i++) {
376			u32 page = temp + i * CI_HDRC_PAGE_SIZE;
377			page &= ~TD_RESERVED_MASK;
378			node->ptr->page[i] = cpu_to_le32(page);
379		}
380	}
381
382	hwreq->req.actual += length;
383
384	if (!list_empty(&hwreq->tds)) {
385		/* get the last entry */
386		lastnode = list_entry(hwreq->tds.prev,
387				struct td_node, td);
388		lastnode->ptr->next = cpu_to_le32(node->dma);
389	}
390
391	INIT_LIST_HEAD(&node->td);
392	list_add_tail(&node->td, &hwreq->tds);
393
394	return 0;
395}
396
397/**
398 * _usb_addr: calculates endpoint address from direction & number
399 * @ep:  endpoint
400 */
401static inline u8 _usb_addr(struct ci_hw_ep *ep)
402{
403	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
404}
405
406/**
407 * _hardware_queue: configures a request at hardware level
408 * @gadget: gadget
409 * @hwep:   endpoint
410 *
411 * This function returns an error code
412 */
413static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
414{
415	struct ci_hdrc *ci = hwep->ci;
416	int ret = 0;
417	unsigned rest = hwreq->req.length;
418	int pages = TD_PAGE_COUNT;
419	struct td_node *firstnode, *lastnode;
420
421	/* don't queue twice */
422	if (hwreq->req.status == -EALREADY)
423		return -EALREADY;
424
425	hwreq->req.status = -EALREADY;
426
427	ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
428	if (ret)
429		return ret;
430
431	/*
432	 * The first buffer could be not page aligned.
433	 * In that case we have to span into one extra td.
434	 */
435	if (hwreq->req.dma % PAGE_SIZE)
436		pages--;
437
438	if (rest == 0)
439		add_td_to_list(hwep, hwreq, 0);
440
441	while (rest > 0) {
442		unsigned count = min(hwreq->req.length - hwreq->req.actual,
443					(unsigned)(pages * CI_HDRC_PAGE_SIZE));
444		add_td_to_list(hwep, hwreq, count);
445		rest -= count;
446	}
447
448	if (hwreq->req.zero && hwreq->req.length
449	    && (hwreq->req.length % hwep->ep.maxpacket == 0))
450		add_td_to_list(hwep, hwreq, 0);
451
452	firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
453
454	lastnode = list_entry(hwreq->tds.prev,
455		struct td_node, td);
456
457	lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
458	if (!hwreq->req.no_interrupt)
459		lastnode->ptr->token |= cpu_to_le32(TD_IOC);
460	wmb();
461
462	hwreq->req.actual = 0;
463	if (!list_empty(&hwep->qh.queue)) {
464		struct ci_hw_req *hwreqprev;
465		int n = hw_ep_bit(hwep->num, hwep->dir);
466		int tmp_stat;
467		struct td_node *prevlastnode;
468		u32 next = firstnode->dma & TD_ADDR_MASK;
469
470		hwreqprev = list_entry(hwep->qh.queue.prev,
471				struct ci_hw_req, queue);
472		prevlastnode = list_entry(hwreqprev->tds.prev,
473				struct td_node, td);
474
475		prevlastnode->ptr->next = cpu_to_le32(next);
476		wmb();
477		if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
478			goto done;
479		do {
480			hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
481			tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
482		} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
483		hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
484		if (tmp_stat)
485			goto done;
486	}
487
488	/*  QH configuration */
489	hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
490	hwep->qh.ptr->td.token &=
491		cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
492
493	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
494		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
495
496		if (hwreq->req.length == 0
497				|| hwreq->req.length % hwep->ep.maxpacket)
498			mul++;
499		hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
500	}
501
502	wmb();   /* synchronize before ep prime */
503
504	ret = hw_ep_prime(ci, hwep->num, hwep->dir,
505			   hwep->type == USB_ENDPOINT_XFER_CONTROL);
506done:
507	return ret;
508}
509
510/*
511 * free_pending_td: remove a pending request for the endpoint
512 * @hwep: endpoint
513 */
514static void free_pending_td(struct ci_hw_ep *hwep)
515{
516	struct td_node *pending = hwep->pending_td;
517
518	dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
519	hwep->pending_td = NULL;
520	kfree(pending);
521}
522
523static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
524					   struct td_node *node)
525{
526	hwep->qh.ptr->td.next = node->dma;
527	hwep->qh.ptr->td.token &=
528		cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
529
530	/* Synchronize before ep prime */
531	wmb();
532
533	return hw_ep_prime(ci, hwep->num, hwep->dir,
534				hwep->type == USB_ENDPOINT_XFER_CONTROL);
535}
536
537/**
538 * _hardware_dequeue: handles a request at hardware level
539 * @gadget: gadget
540 * @hwep:   endpoint
541 *
542 * This function returns an error code
543 */
544static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
545{
546	u32 tmptoken;
547	struct td_node *node, *tmpnode;
548	unsigned remaining_length;
549	unsigned actual = hwreq->req.length;
550	struct ci_hdrc *ci = hwep->ci;
551
552	if (hwreq->req.status != -EALREADY)
553		return -EINVAL;
554
555	hwreq->req.status = 0;
556
557	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
558		tmptoken = le32_to_cpu(node->ptr->token);
559		if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
560			int n = hw_ep_bit(hwep->num, hwep->dir);
561
562			if (ci->rev == CI_REVISION_24)
563				if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
564					reprime_dtd(ci, hwep, node);
565			hwreq->req.status = -EALREADY;
566			return -EBUSY;
567		}
568
569		remaining_length = (tmptoken & TD_TOTAL_BYTES);
570		remaining_length >>= __ffs(TD_TOTAL_BYTES);
571		actual -= remaining_length;
572
573		hwreq->req.status = tmptoken & TD_STATUS;
574		if ((TD_STATUS_HALTED & hwreq->req.status)) {
575			hwreq->req.status = -EPIPE;
576			break;
577		} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
578			hwreq->req.status = -EPROTO;
579			break;
580		} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
581			hwreq->req.status = -EILSEQ;
582			break;
583		}
584
585		if (remaining_length) {
586			if (hwep->dir) {
587				hwreq->req.status = -EPROTO;
588				break;
589			}
590		}
591		/*
592		 * As the hardware could still address the freed td
593		 * which will run the udc unusable, the cleanup of the
594		 * td has to be delayed by one.
595		 */
596		if (hwep->pending_td)
597			free_pending_td(hwep);
598
599		hwep->pending_td = node;
600		list_del_init(&node->td);
601	}
602
603	usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
604
605	hwreq->req.actual += actual;
606
607	if (hwreq->req.status)
608		return hwreq->req.status;
609
610	return hwreq->req.actual;
611}
612
613/**
614 * _ep_nuke: dequeues all endpoint requests
615 * @hwep: endpoint
616 *
617 * This function returns an error code
618 * Caller must hold lock
619 */
620static int _ep_nuke(struct ci_hw_ep *hwep)
621__releases(hwep->lock)
622__acquires(hwep->lock)
623{
624	struct td_node *node, *tmpnode;
625	if (hwep == NULL)
626		return -EINVAL;
627
628	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
629
630	while (!list_empty(&hwep->qh.queue)) {
631
632		/* pop oldest request */
633		struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
634						     struct ci_hw_req, queue);
635
636		list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
637			dma_pool_free(hwep->td_pool, node->ptr, node->dma);
638			list_del_init(&node->td);
639			node->ptr = NULL;
640			kfree(node);
641		}
642
643		list_del_init(&hwreq->queue);
644		hwreq->req.status = -ESHUTDOWN;
645
646		if (hwreq->req.complete != NULL) {
647			spin_unlock(hwep->lock);
648			usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
649			spin_lock(hwep->lock);
650		}
651	}
652
653	if (hwep->pending_td)
654		free_pending_td(hwep);
655
656	return 0;
657}
658
659static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
660{
661	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
662	int direction, retval = 0;
663	unsigned long flags;
664
665	if (ep == NULL || hwep->ep.desc == NULL)
666		return -EINVAL;
667
668	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
669		return -EOPNOTSUPP;
670
671	spin_lock_irqsave(hwep->lock, flags);
672
673	if (value && hwep->dir == TX && check_transfer &&
674		!list_empty(&hwep->qh.queue) &&
675			!usb_endpoint_xfer_control(hwep->ep.desc)) {
676		spin_unlock_irqrestore(hwep->lock, flags);
677		return -EAGAIN;
678	}
679
680	direction = hwep->dir;
681	do {
682		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
683
684		if (!value)
685			hwep->wedge = 0;
686
687		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
688			hwep->dir = (hwep->dir == TX) ? RX : TX;
689
690	} while (hwep->dir != direction);
691
692	spin_unlock_irqrestore(hwep->lock, flags);
693	return retval;
694}
695
696
697/**
698 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
699 * @gadget: gadget
700 *
701 * This function returns an error code
702 */
703static int _gadget_stop_activity(struct usb_gadget *gadget)
704{
705	struct usb_ep *ep;
706	struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
707	unsigned long flags;
708
709	spin_lock_irqsave(&ci->lock, flags);
710	ci->gadget.speed = USB_SPEED_UNKNOWN;
711	ci->remote_wakeup = 0;
712	ci->suspended = 0;
713	spin_unlock_irqrestore(&ci->lock, flags);
714
715	/* flush all endpoints */
716	gadget_for_each_ep(ep, gadget) {
717		usb_ep_fifo_flush(ep);
718	}
719	usb_ep_fifo_flush(&ci->ep0out->ep);
720	usb_ep_fifo_flush(&ci->ep0in->ep);
721
722	/* make sure to disable all endpoints */
723	gadget_for_each_ep(ep, gadget) {
724		usb_ep_disable(ep);
725	}
726
727	if (ci->status != NULL) {
728		usb_ep_free_request(&ci->ep0in->ep, ci->status);
729		ci->status = NULL;
730	}
731
732	return 0;
733}
734
735/******************************************************************************
736 * ISR block
737 *****************************************************************************/
738/**
739 * isr_reset_handler: USB reset interrupt handler
740 * @ci: UDC device
741 *
742 * This function resets USB engine after a bus reset occurred
743 */
744static void isr_reset_handler(struct ci_hdrc *ci)
745__releases(ci->lock)
746__acquires(ci->lock)
747{
748	int retval;
749
750	spin_unlock(&ci->lock);
751	if (ci->gadget.speed != USB_SPEED_UNKNOWN)
752		usb_gadget_udc_reset(&ci->gadget, ci->driver);
753
754	retval = _gadget_stop_activity(&ci->gadget);
755	if (retval)
756		goto done;
757
758	retval = hw_usb_reset(ci);
759	if (retval)
760		goto done;
761
762	ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
763	if (ci->status == NULL)
764		retval = -ENOMEM;
765
766done:
767	spin_lock(&ci->lock);
768
769	if (retval)
770		dev_err(ci->dev, "error: %i\n", retval);
771}
772
773/**
774 * isr_get_status_complete: get_status request complete function
775 * @ep:  endpoint
776 * @req: request handled
777 *
778 * Caller must release lock
779 */
780static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
781{
782	if (ep == NULL || req == NULL)
783		return;
784
785	kfree(req->buf);
786	usb_ep_free_request(ep, req);
787}
788
789/**
790 * _ep_queue: queues (submits) an I/O request to an endpoint
791 *
792 * Caller must hold lock
793 */
794static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
795		    gfp_t __maybe_unused gfp_flags)
796{
797	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
798	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
799	struct ci_hdrc *ci = hwep->ci;
800	int retval = 0;
801
802	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
803		return -EINVAL;
804
805	if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
806		if (req->length)
807			hwep = (ci->ep0_dir == RX) ?
808			       ci->ep0out : ci->ep0in;
809		if (!list_empty(&hwep->qh.queue)) {
810			_ep_nuke(hwep);
811			retval = -EOVERFLOW;
812			dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
813				 _usb_addr(hwep));
814		}
815	}
816
817	if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
818	    hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
819		dev_err(hwep->ci->dev, "request length too big for isochronous\n");
820		return -EMSGSIZE;
821	}
822
823	/* first nuke then test link, e.g. previous status has not sent */
824	if (!list_empty(&hwreq->queue)) {
825		dev_err(hwep->ci->dev, "request already in queue\n");
826		return -EBUSY;
827	}
828
829	/* push request */
830	hwreq->req.status = -EINPROGRESS;
831	hwreq->req.actual = 0;
832
833	retval = _hardware_enqueue(hwep, hwreq);
834
835	if (retval == -EALREADY)
836		retval = 0;
837	if (!retval)
838		list_add_tail(&hwreq->queue, &hwep->qh.queue);
839
840	return retval;
841}
842
843/**
844 * isr_get_status_response: get_status request response
845 * @ci: ci struct
846 * @setup: setup request packet
847 *
848 * This function returns an error code
849 */
850static int isr_get_status_response(struct ci_hdrc *ci,
851				   struct usb_ctrlrequest *setup)
852__releases(hwep->lock)
853__acquires(hwep->lock)
854{
855	struct ci_hw_ep *hwep = ci->ep0in;
856	struct usb_request *req = NULL;
857	gfp_t gfp_flags = GFP_ATOMIC;
858	int dir, num, retval;
859
860	if (hwep == NULL || setup == NULL)
861		return -EINVAL;
862
863	spin_unlock(hwep->lock);
864	req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
865	spin_lock(hwep->lock);
866	if (req == NULL)
867		return -ENOMEM;
868
869	req->complete = isr_get_status_complete;
870	req->length   = 2;
871	req->buf      = kzalloc(req->length, gfp_flags);
872	if (req->buf == NULL) {
873		retval = -ENOMEM;
874		goto err_free_req;
875	}
876
877	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
878		*(u16 *)req->buf = (ci->remote_wakeup << 1) |
879			ci->gadget.is_selfpowered;
880	} else if ((setup->bRequestType & USB_RECIP_MASK) \
881		   == USB_RECIP_ENDPOINT) {
882		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
883			TX : RX;
884		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
885		*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
886	}
887	/* else do nothing; reserved for future use */
888
889	retval = _ep_queue(&hwep->ep, req, gfp_flags);
890	if (retval)
891		goto err_free_buf;
892
893	return 0;
894
895 err_free_buf:
896	kfree(req->buf);
897 err_free_req:
898	spin_unlock(hwep->lock);
899	usb_ep_free_request(&hwep->ep, req);
900	spin_lock(hwep->lock);
901	return retval;
902}
903
904/**
905 * isr_setup_status_complete: setup_status request complete function
906 * @ep:  endpoint
907 * @req: request handled
908 *
909 * Caller must release lock. Put the port in test mode if test mode
910 * feature is selected.
911 */
912static void
913isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
914{
915	struct ci_hdrc *ci = req->context;
916	unsigned long flags;
917
918	if (ci->setaddr) {
919		hw_usb_set_address(ci, ci->address);
920		ci->setaddr = false;
921		if (ci->address)
922			usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
923	}
924
925	spin_lock_irqsave(&ci->lock, flags);
926	if (ci->test_mode)
927		hw_port_test_set(ci, ci->test_mode);
928	spin_unlock_irqrestore(&ci->lock, flags);
929}
930
931/**
932 * isr_setup_status_phase: queues the status phase of a setup transation
933 * @ci: ci struct
934 *
935 * This function returns an error code
936 */
937static int isr_setup_status_phase(struct ci_hdrc *ci)
938{
939	int retval;
940	struct ci_hw_ep *hwep;
941
942	hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
943	ci->status->context = ci;
944	ci->status->complete = isr_setup_status_complete;
945
946	retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
947
948	return retval;
949}
950
951/**
952 * isr_tr_complete_low: transaction complete low level handler
953 * @hwep: endpoint
954 *
955 * This function returns an error code
956 * Caller must hold lock
957 */
958static int isr_tr_complete_low(struct ci_hw_ep *hwep)
959__releases(hwep->lock)
960__acquires(hwep->lock)
961{
962	struct ci_hw_req *hwreq, *hwreqtemp;
963	struct ci_hw_ep *hweptemp = hwep;
964	int retval = 0;
965
966	list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
967			queue) {
968		retval = _hardware_dequeue(hwep, hwreq);
969		if (retval < 0)
970			break;
971		list_del_init(&hwreq->queue);
972		if (hwreq->req.complete != NULL) {
973			spin_unlock(hwep->lock);
974			if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
975					hwreq->req.length)
976				hweptemp = hwep->ci->ep0in;
977			usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
978			spin_lock(hwep->lock);
979		}
980	}
981
982	if (retval == -EBUSY)
983		retval = 0;
984
985	return retval;
986}
987
988static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
989{
990	dev_warn(&ci->gadget.dev,
991		"connect the device to an alternate port if you want HNP\n");
992	return isr_setup_status_phase(ci);
993}
994
995/**
996 * isr_setup_packet_handler: setup packet handler
997 * @ci: UDC descriptor
998 *
999 * This function handles setup packet
1000 */
1001static void isr_setup_packet_handler(struct ci_hdrc *ci)
1002__releases(ci->lock)
1003__acquires(ci->lock)
1004{
1005	struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1006	struct usb_ctrlrequest req;
1007	int type, num, dir, err = -EINVAL;
1008	u8 tmode = 0;
1009
1010	/*
1011	 * Flush data and handshake transactions of previous
1012	 * setup packet.
1013	 */
1014	_ep_nuke(ci->ep0out);
1015	_ep_nuke(ci->ep0in);
1016
1017	/* read_setup_packet */
1018	do {
1019		hw_test_and_set_setup_guard(ci);
1020		memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1021	} while (!hw_test_and_clear_setup_guard(ci));
1022
1023	type = req.bRequestType;
1024
1025	ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1026
1027	switch (req.bRequest) {
1028	case USB_REQ_CLEAR_FEATURE:
1029		if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1030				le16_to_cpu(req.wValue) ==
1031				USB_ENDPOINT_HALT) {
1032			if (req.wLength != 0)
1033				break;
1034			num  = le16_to_cpu(req.wIndex);
1035			dir = num & USB_ENDPOINT_DIR_MASK;
1036			num &= USB_ENDPOINT_NUMBER_MASK;
1037			if (dir) /* TX */
1038				num += ci->hw_ep_max / 2;
1039			if (!ci->ci_hw_ep[num].wedge) {
1040				spin_unlock(&ci->lock);
1041				err = usb_ep_clear_halt(
1042					&ci->ci_hw_ep[num].ep);
1043				spin_lock(&ci->lock);
1044				if (err)
1045					break;
1046			}
1047			err = isr_setup_status_phase(ci);
1048		} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1049				le16_to_cpu(req.wValue) ==
1050				USB_DEVICE_REMOTE_WAKEUP) {
1051			if (req.wLength != 0)
1052				break;
1053			ci->remote_wakeup = 0;
1054			err = isr_setup_status_phase(ci);
1055		} else {
1056			goto delegate;
1057		}
1058		break;
1059	case USB_REQ_GET_STATUS:
1060		if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
1061		    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1062		    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1063			goto delegate;
1064		if (le16_to_cpu(req.wLength) != 2 ||
1065		    le16_to_cpu(req.wValue)  != 0)
1066			break;
1067		err = isr_get_status_response(ci, &req);
1068		break;
1069	case USB_REQ_SET_ADDRESS:
1070		if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1071			goto delegate;
1072		if (le16_to_cpu(req.wLength) != 0 ||
1073		    le16_to_cpu(req.wIndex)  != 0)
1074			break;
1075		ci->address = (u8)le16_to_cpu(req.wValue);
1076		ci->setaddr = true;
1077		err = isr_setup_status_phase(ci);
1078		break;
1079	case USB_REQ_SET_FEATURE:
1080		if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1081				le16_to_cpu(req.wValue) ==
1082				USB_ENDPOINT_HALT) {
1083			if (req.wLength != 0)
1084				break;
1085			num  = le16_to_cpu(req.wIndex);
1086			dir = num & USB_ENDPOINT_DIR_MASK;
1087			num &= USB_ENDPOINT_NUMBER_MASK;
1088			if (dir) /* TX */
1089				num += ci->hw_ep_max / 2;
1090
1091			spin_unlock(&ci->lock);
1092			err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1093			spin_lock(&ci->lock);
1094			if (!err)
1095				isr_setup_status_phase(ci);
1096		} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1097			if (req.wLength != 0)
1098				break;
1099			switch (le16_to_cpu(req.wValue)) {
1100			case USB_DEVICE_REMOTE_WAKEUP:
1101				ci->remote_wakeup = 1;
1102				err = isr_setup_status_phase(ci);
1103				break;
1104			case USB_DEVICE_TEST_MODE:
1105				tmode = le16_to_cpu(req.wIndex) >> 8;
1106				switch (tmode) {
1107				case TEST_J:
1108				case TEST_K:
1109				case TEST_SE0_NAK:
1110				case TEST_PACKET:
1111				case TEST_FORCE_EN:
1112					ci->test_mode = tmode;
1113					err = isr_setup_status_phase(
1114							ci);
1115					break;
1116				default:
1117					break;
1118				}
1119				break;
1120			case USB_DEVICE_B_HNP_ENABLE:
1121				if (ci_otg_is_fsm_mode(ci)) {
1122					ci->gadget.b_hnp_enable = 1;
1123					err = isr_setup_status_phase(
1124							ci);
1125				}
1126				break;
1127			case USB_DEVICE_A_ALT_HNP_SUPPORT:
1128				if (ci_otg_is_fsm_mode(ci))
1129					err = otg_a_alt_hnp_support(ci);
1130				break;
1131			default:
1132				goto delegate;
1133			}
1134		} else {
1135			goto delegate;
1136		}
1137		break;
1138	default:
1139delegate:
1140		if (req.wLength == 0)   /* no data phase */
1141			ci->ep0_dir = TX;
1142
1143		spin_unlock(&ci->lock);
1144		err = ci->driver->setup(&ci->gadget, &req);
1145		spin_lock(&ci->lock);
1146		break;
1147	}
1148
1149	if (err < 0) {
1150		spin_unlock(&ci->lock);
1151		if (_ep_set_halt(&hwep->ep, 1, false))
1152			dev_err(ci->dev, "error: _ep_set_halt\n");
1153		spin_lock(&ci->lock);
1154	}
1155}
1156
1157/**
1158 * isr_tr_complete_handler: transaction complete interrupt handler
1159 * @ci: UDC descriptor
1160 *
1161 * This function handles traffic events
1162 */
1163static void isr_tr_complete_handler(struct ci_hdrc *ci)
1164__releases(ci->lock)
1165__acquires(ci->lock)
1166{
1167	unsigned i;
1168	int err;
1169
1170	for (i = 0; i < ci->hw_ep_max; i++) {
1171		struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
1172
1173		if (hwep->ep.desc == NULL)
1174			continue;   /* not configured */
1175
1176		if (hw_test_and_clear_complete(ci, i)) {
1177			err = isr_tr_complete_low(hwep);
1178			if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1179				if (err > 0)   /* needs status phase */
1180					err = isr_setup_status_phase(ci);
1181				if (err < 0) {
1182					spin_unlock(&ci->lock);
1183					if (_ep_set_halt(&hwep->ep, 1, false))
1184						dev_err(ci->dev,
1185						"error: _ep_set_halt\n");
1186					spin_lock(&ci->lock);
1187				}
1188			}
1189		}
1190
1191		/* Only handle setup packet below */
1192		if (i == 0 &&
1193			hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1194			isr_setup_packet_handler(ci);
1195	}
1196}
1197
1198/******************************************************************************
1199 * ENDPT block
1200 *****************************************************************************/
1201/**
1202 * ep_enable: configure endpoint, making it usable
1203 *
1204 * Check usb_ep_enable() at "usb_gadget.h" for details
1205 */
1206static int ep_enable(struct usb_ep *ep,
1207		     const struct usb_endpoint_descriptor *desc)
1208{
1209	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1210	int retval = 0;
1211	unsigned long flags;
1212	u32 cap = 0;
1213
1214	if (ep == NULL || desc == NULL)
1215		return -EINVAL;
1216
1217	spin_lock_irqsave(hwep->lock, flags);
1218
1219	/* only internal SW should enable ctrl endpts */
1220
1221	if (!list_empty(&hwep->qh.queue)) {
1222		dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1223		spin_unlock_irqrestore(hwep->lock, flags);
1224		return -EBUSY;
1225	}
1226
1227	hwep->ep.desc = desc;
1228
1229	hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1230	hwep->num  = usb_endpoint_num(desc);
1231	hwep->type = usb_endpoint_type(desc);
1232
1233	hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
1234	hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
1235
1236	if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1237		cap |= QH_IOS;
1238
1239	cap |= QH_ZLT;
1240	cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1241	/*
1242	 * For ISO-TX, we set mult at QH as the largest value, and use
1243	 * MultO at TD as real mult value.
1244	 */
1245	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1246		cap |= 3 << __ffs(QH_MULT);
1247
1248	hwep->qh.ptr->cap = cpu_to_le32(cap);
1249
1250	hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1251
1252	if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1253		dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1254		retval = -EINVAL;
1255	}
1256
1257	/*
1258	 * Enable endpoints in the HW other than ep0 as ep0
1259	 * is always enabled
1260	 */
1261	if (hwep->num)
1262		retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1263				       hwep->type);
1264
1265	spin_unlock_irqrestore(hwep->lock, flags);
1266	return retval;
1267}
1268
1269/**
1270 * ep_disable: endpoint is no longer usable
1271 *
1272 * Check usb_ep_disable() at "usb_gadget.h" for details
1273 */
1274static int ep_disable(struct usb_ep *ep)
1275{
1276	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1277	int direction, retval = 0;
1278	unsigned long flags;
1279
1280	if (ep == NULL)
1281		return -EINVAL;
1282	else if (hwep->ep.desc == NULL)
1283		return -EBUSY;
1284
1285	spin_lock_irqsave(hwep->lock, flags);
1286
1287	/* only internal SW should disable ctrl endpts */
1288
1289	direction = hwep->dir;
1290	do {
1291		retval |= _ep_nuke(hwep);
1292		retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1293
1294		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1295			hwep->dir = (hwep->dir == TX) ? RX : TX;
1296
1297	} while (hwep->dir != direction);
1298
1299	hwep->ep.desc = NULL;
1300
1301	spin_unlock_irqrestore(hwep->lock, flags);
1302	return retval;
1303}
1304
1305/**
1306 * ep_alloc_request: allocate a request object to use with this endpoint
1307 *
1308 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1309 */
1310static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1311{
1312	struct ci_hw_req *hwreq = NULL;
1313
1314	if (ep == NULL)
1315		return NULL;
1316
1317	hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1318	if (hwreq != NULL) {
1319		INIT_LIST_HEAD(&hwreq->queue);
1320		INIT_LIST_HEAD(&hwreq->tds);
1321	}
1322
1323	return (hwreq == NULL) ? NULL : &hwreq->req;
1324}
1325
1326/**
1327 * ep_free_request: frees a request object
1328 *
1329 * Check usb_ep_free_request() at "usb_gadget.h" for details
1330 */
1331static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1332{
1333	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1334	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1335	struct td_node *node, *tmpnode;
1336	unsigned long flags;
1337
1338	if (ep == NULL || req == NULL) {
1339		return;
1340	} else if (!list_empty(&hwreq->queue)) {
1341		dev_err(hwep->ci->dev, "freeing queued request\n");
1342		return;
1343	}
1344
1345	spin_lock_irqsave(hwep->lock, flags);
1346
1347	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1348		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1349		list_del_init(&node->td);
1350		node->ptr = NULL;
1351		kfree(node);
1352	}
1353
1354	kfree(hwreq);
1355
1356	spin_unlock_irqrestore(hwep->lock, flags);
1357}
1358
1359/**
1360 * ep_queue: queues (submits) an I/O request to an endpoint
1361 *
1362 * Check usb_ep_queue()* at usb_gadget.h" for details
1363 */
1364static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1365		    gfp_t __maybe_unused gfp_flags)
1366{
1367	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1368	int retval = 0;
1369	unsigned long flags;
1370
1371	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1372		return -EINVAL;
1373
1374	spin_lock_irqsave(hwep->lock, flags);
1375	retval = _ep_queue(ep, req, gfp_flags);
1376	spin_unlock_irqrestore(hwep->lock, flags);
1377	return retval;
1378}
1379
1380/**
1381 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1382 *
1383 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1384 */
1385static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1386{
1387	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1388	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1389	unsigned long flags;
1390	struct td_node *node, *tmpnode;
1391
1392	if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1393		hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1394		list_empty(&hwep->qh.queue))
1395		return -EINVAL;
1396
1397	spin_lock_irqsave(hwep->lock, flags);
1398
1399	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1400
1401	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1402		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1403		list_del(&node->td);
1404		kfree(node);
1405	}
1406
1407	/* pop request */
1408	list_del_init(&hwreq->queue);
1409
1410	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1411
1412	req->status = -ECONNRESET;
1413
1414	if (hwreq->req.complete != NULL) {
1415		spin_unlock(hwep->lock);
1416		usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1417		spin_lock(hwep->lock);
1418	}
1419
1420	spin_unlock_irqrestore(hwep->lock, flags);
1421	return 0;
1422}
1423
1424/**
1425 * ep_set_halt: sets the endpoint halt feature
1426 *
1427 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1428 */
1429static int ep_set_halt(struct usb_ep *ep, int value)
1430{
1431	return _ep_set_halt(ep, value, true);
1432}
1433
1434/**
1435 * ep_set_wedge: sets the halt feature and ignores clear requests
1436 *
1437 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1438 */
1439static int ep_set_wedge(struct usb_ep *ep)
1440{
1441	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1442	unsigned long flags;
1443
1444	if (ep == NULL || hwep->ep.desc == NULL)
1445		return -EINVAL;
1446
1447	spin_lock_irqsave(hwep->lock, flags);
1448	hwep->wedge = 1;
1449	spin_unlock_irqrestore(hwep->lock, flags);
1450
1451	return usb_ep_set_halt(ep);
1452}
1453
1454/**
1455 * ep_fifo_flush: flushes contents of a fifo
1456 *
1457 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1458 */
1459static void ep_fifo_flush(struct usb_ep *ep)
1460{
1461	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1462	unsigned long flags;
1463
1464	if (ep == NULL) {
1465		dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1466		return;
1467	}
1468
1469	spin_lock_irqsave(hwep->lock, flags);
1470
1471	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1472
1473	spin_unlock_irqrestore(hwep->lock, flags);
1474}
1475
1476/**
1477 * Endpoint-specific part of the API to the USB controller hardware
1478 * Check "usb_gadget.h" for details
1479 */
1480static const struct usb_ep_ops usb_ep_ops = {
1481	.enable	       = ep_enable,
1482	.disable       = ep_disable,
1483	.alloc_request = ep_alloc_request,
1484	.free_request  = ep_free_request,
1485	.queue	       = ep_queue,
1486	.dequeue       = ep_dequeue,
1487	.set_halt      = ep_set_halt,
1488	.set_wedge     = ep_set_wedge,
1489	.fifo_flush    = ep_fifo_flush,
1490};
1491
1492/******************************************************************************
1493 * GADGET block
1494 *****************************************************************************/
1495static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1496{
1497	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1498	unsigned long flags;
1499	int gadget_ready = 0;
1500
1501	spin_lock_irqsave(&ci->lock, flags);
1502	ci->vbus_active = is_active;
1503	if (ci->driver)
1504		gadget_ready = 1;
1505	spin_unlock_irqrestore(&ci->lock, flags);
1506
1507	if (gadget_ready) {
1508		if (is_active) {
1509			pm_runtime_get_sync(&_gadget->dev);
1510			hw_device_reset(ci);
1511			hw_device_state(ci, ci->ep0out->qh.dma);
1512			usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1513			usb_udc_vbus_handler(_gadget, true);
1514		} else {
1515			usb_udc_vbus_handler(_gadget, false);
1516			if (ci->driver)
1517				ci->driver->disconnect(&ci->gadget);
1518			hw_device_state(ci, 0);
1519			if (ci->platdata->notify_event)
1520				ci->platdata->notify_event(ci,
1521				CI_HDRC_CONTROLLER_STOPPED_EVENT);
1522			_gadget_stop_activity(&ci->gadget);
1523			pm_runtime_put_sync(&_gadget->dev);
1524			usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1525		}
1526	}
1527
1528	return 0;
1529}
1530
1531static int ci_udc_wakeup(struct usb_gadget *_gadget)
1532{
1533	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1534	unsigned long flags;
1535	int ret = 0;
1536
1537	spin_lock_irqsave(&ci->lock, flags);
1538	if (!ci->remote_wakeup) {
1539		ret = -EOPNOTSUPP;
1540		goto out;
1541	}
1542	if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1543		ret = -EINVAL;
1544		goto out;
1545	}
1546	hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1547out:
1548	spin_unlock_irqrestore(&ci->lock, flags);
1549	return ret;
1550}
1551
1552static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1553{
1554	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1555
1556	if (ci->usb_phy)
1557		return usb_phy_set_power(ci->usb_phy, ma);
1558	return -ENOTSUPP;
1559}
1560
1561static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1562{
1563	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1564	struct ci_hw_ep *hwep = ci->ep0in;
1565	unsigned long flags;
1566
1567	spin_lock_irqsave(hwep->lock, flags);
1568	_gadget->is_selfpowered = (is_on != 0);
1569	spin_unlock_irqrestore(hwep->lock, flags);
1570
1571	return 0;
1572}
1573
1574/* Change Data+ pullup status
1575 * this func is used by usb_gadget_connect/disconnet
1576 */
1577static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1578{
1579	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1580
1581	/* Data+ pullup controlled by OTG state machine in OTG fsm mode */
1582	if (ci_otg_is_fsm_mode(ci))
1583		return 0;
1584
1585	pm_runtime_get_sync(&ci->gadget.dev);
1586	if (is_on)
1587		hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1588	else
1589		hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1590	pm_runtime_put_sync(&ci->gadget.dev);
1591
1592	return 0;
1593}
1594
1595static int ci_udc_start(struct usb_gadget *gadget,
1596			 struct usb_gadget_driver *driver);
1597static int ci_udc_stop(struct usb_gadget *gadget);
1598/**
1599 * Device operations part of the API to the USB controller hardware,
1600 * which don't involve endpoints (or i/o)
1601 * Check  "usb_gadget.h" for details
1602 */
1603static const struct usb_gadget_ops usb_gadget_ops = {
1604	.vbus_session	= ci_udc_vbus_session,
1605	.wakeup		= ci_udc_wakeup,
1606	.set_selfpowered	= ci_udc_selfpowered,
1607	.pullup		= ci_udc_pullup,
1608	.vbus_draw	= ci_udc_vbus_draw,
1609	.udc_start	= ci_udc_start,
1610	.udc_stop	= ci_udc_stop,
1611};
1612
1613static int init_eps(struct ci_hdrc *ci)
1614{
1615	int retval = 0, i, j;
1616
1617	for (i = 0; i < ci->hw_ep_max/2; i++)
1618		for (j = RX; j <= TX; j++) {
1619			int k = i + j * ci->hw_ep_max/2;
1620			struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1621
1622			scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1623					(j == TX)  ? "in" : "out");
1624
1625			hwep->ci          = ci;
1626			hwep->lock         = &ci->lock;
1627			hwep->td_pool      = ci->td_pool;
1628
1629			hwep->ep.name      = hwep->name;
1630			hwep->ep.ops       = &usb_ep_ops;
1631			/*
1632			 * for ep0: maxP defined in desc, for other
1633			 * eps, maxP is set by epautoconfig() called
1634			 * by gadget layer
1635			 */
1636			usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1637
1638			INIT_LIST_HEAD(&hwep->qh.queue);
1639			hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1640						     &hwep->qh.dma);
1641			if (hwep->qh.ptr == NULL)
1642				retval = -ENOMEM;
1643			else
1644				memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
1645
1646			/*
1647			 * set up shorthands for ep0 out and in endpoints,
1648			 * don't add to gadget's ep_list
1649			 */
1650			if (i == 0) {
1651				if (j == RX)
1652					ci->ep0out = hwep;
1653				else
1654					ci->ep0in = hwep;
1655
1656				usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1657				continue;
1658			}
1659
1660			list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1661		}
1662
1663	return retval;
1664}
1665
1666static void destroy_eps(struct ci_hdrc *ci)
1667{
1668	int i;
1669
1670	for (i = 0; i < ci->hw_ep_max; i++) {
1671		struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1672
1673		if (hwep->pending_td)
1674			free_pending_td(hwep);
1675		dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1676	}
1677}
1678
1679/**
1680 * ci_udc_start: register a gadget driver
1681 * @gadget: our gadget
1682 * @driver: the driver being registered
1683 *
1684 * Interrupts are enabled here.
1685 */
1686static int ci_udc_start(struct usb_gadget *gadget,
1687			 struct usb_gadget_driver *driver)
1688{
1689	struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1690	unsigned long flags;
1691	int retval = -ENOMEM;
1692
1693	if (driver->disconnect == NULL)
1694		return -EINVAL;
1695
1696
1697	ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1698	retval = usb_ep_enable(&ci->ep0out->ep);
1699	if (retval)
1700		return retval;
1701
1702	ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1703	retval = usb_ep_enable(&ci->ep0in->ep);
1704	if (retval)
1705		return retval;
1706
1707	ci->driver = driver;
1708
1709	/* Start otg fsm for B-device */
1710	if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1711		ci_hdrc_otg_fsm_start(ci);
1712		return retval;
1713	}
1714
1715	pm_runtime_get_sync(&ci->gadget.dev);
1716	if (ci->vbus_active) {
1717		spin_lock_irqsave(&ci->lock, flags);
1718		hw_device_reset(ci);
1719	} else {
1720		usb_udc_vbus_handler(&ci->gadget, false);
1721		pm_runtime_put_sync(&ci->gadget.dev);
1722		return retval;
1723	}
1724
1725	retval = hw_device_state(ci, ci->ep0out->qh.dma);
1726	spin_unlock_irqrestore(&ci->lock, flags);
1727	if (retval)
1728		pm_runtime_put_sync(&ci->gadget.dev);
1729
1730	return retval;
1731}
1732
1733static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1734{
1735	if (!ci_otg_is_fsm_mode(ci))
1736		return;
1737
1738	mutex_lock(&ci->fsm.lock);
1739	if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1740		ci->fsm.a_bidl_adis_tmout = 1;
1741		ci_hdrc_otg_fsm_start(ci);
1742	} else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1743		ci->fsm.protocol = PROTO_UNDEF;
1744		ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1745	}
1746	mutex_unlock(&ci->fsm.lock);
1747}
1748
1749/**
1750 * ci_udc_stop: unregister a gadget driver
1751 */
1752static int ci_udc_stop(struct usb_gadget *gadget)
1753{
1754	struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1755	unsigned long flags;
1756
1757	spin_lock_irqsave(&ci->lock, flags);
1758
1759	if (ci->vbus_active) {
1760		hw_device_state(ci, 0);
1761		if (ci->platdata->notify_event)
1762			ci->platdata->notify_event(ci,
1763			CI_HDRC_CONTROLLER_STOPPED_EVENT);
1764		spin_unlock_irqrestore(&ci->lock, flags);
1765		_gadget_stop_activity(&ci->gadget);
1766		spin_lock_irqsave(&ci->lock, flags);
1767		pm_runtime_put(&ci->gadget.dev);
1768	}
1769
1770	ci->driver = NULL;
1771	spin_unlock_irqrestore(&ci->lock, flags);
1772
1773	ci_udc_stop_for_otg_fsm(ci);
1774	return 0;
1775}
1776
1777/******************************************************************************
1778 * BUS block
1779 *****************************************************************************/
1780/**
1781 * udc_irq: ci interrupt handler
1782 *
1783 * This function returns IRQ_HANDLED if the IRQ has been handled
1784 * It locks access to registers
1785 */
1786static irqreturn_t udc_irq(struct ci_hdrc *ci)
1787{
1788	irqreturn_t retval;
1789	u32 intr;
1790
1791	if (ci == NULL)
1792		return IRQ_HANDLED;
1793
1794	spin_lock(&ci->lock);
1795
1796	if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1797		if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1798				USBMODE_CM_DC) {
1799			spin_unlock(&ci->lock);
1800			return IRQ_NONE;
1801		}
1802	}
1803	intr = hw_test_and_clear_intr_active(ci);
1804
1805	if (intr) {
1806		/* order defines priority - do NOT change it */
1807		if (USBi_URI & intr)
1808			isr_reset_handler(ci);
1809
1810		if (USBi_PCI & intr) {
1811			ci->gadget.speed = hw_port_is_high_speed(ci) ?
1812				USB_SPEED_HIGH : USB_SPEED_FULL;
1813			if (ci->suspended && ci->driver->resume) {
1814				spin_unlock(&ci->lock);
1815				ci->driver->resume(&ci->gadget);
1816				spin_lock(&ci->lock);
1817				ci->suspended = 0;
1818			}
1819		}
1820
1821		if (USBi_UI  & intr)
1822			isr_tr_complete_handler(ci);
1823
1824		if (USBi_SLI & intr) {
1825			if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1826			    ci->driver->suspend) {
1827				ci->suspended = 1;
1828				spin_unlock(&ci->lock);
1829				ci->driver->suspend(&ci->gadget);
1830				usb_gadget_set_state(&ci->gadget,
1831						USB_STATE_SUSPENDED);
1832				spin_lock(&ci->lock);
1833			}
1834		}
1835		retval = IRQ_HANDLED;
1836	} else {
1837		retval = IRQ_NONE;
1838	}
1839	spin_unlock(&ci->lock);
1840
1841	return retval;
1842}
1843
1844/**
1845 * udc_start: initialize gadget role
1846 * @ci: chipidea controller
1847 */
1848static int udc_start(struct ci_hdrc *ci)
1849{
1850	struct device *dev = ci->dev;
1851	int retval = 0;
1852
1853	spin_lock_init(&ci->lock);
1854
1855	ci->gadget.ops          = &usb_gadget_ops;
1856	ci->gadget.speed        = USB_SPEED_UNKNOWN;
1857	ci->gadget.max_speed    = USB_SPEED_HIGH;
1858	ci->gadget.is_otg       = ci->is_otg ? 1 : 0;
1859	ci->gadget.name         = ci->platdata->name;
1860
1861	INIT_LIST_HEAD(&ci->gadget.ep_list);
1862
1863	/* alloc resources */
1864	ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
1865				       sizeof(struct ci_hw_qh),
1866				       64, CI_HDRC_PAGE_SIZE);
1867	if (ci->qh_pool == NULL)
1868		return -ENOMEM;
1869
1870	ci->td_pool = dma_pool_create("ci_hw_td", dev,
1871				       sizeof(struct ci_hw_td),
1872				       64, CI_HDRC_PAGE_SIZE);
1873	if (ci->td_pool == NULL) {
1874		retval = -ENOMEM;
1875		goto free_qh_pool;
1876	}
1877
1878	retval = init_eps(ci);
1879	if (retval)
1880		goto free_pools;
1881
1882	ci->gadget.ep0 = &ci->ep0in->ep;
1883
1884	retval = usb_add_gadget_udc(dev, &ci->gadget);
1885	if (retval)
1886		goto destroy_eps;
1887
1888	pm_runtime_no_callbacks(&ci->gadget.dev);
1889	pm_runtime_enable(&ci->gadget.dev);
1890
1891	return retval;
1892
1893destroy_eps:
1894	destroy_eps(ci);
1895free_pools:
1896	dma_pool_destroy(ci->td_pool);
1897free_qh_pool:
1898	dma_pool_destroy(ci->qh_pool);
1899	return retval;
1900}
1901
1902/**
1903 * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
1904 *
1905 * No interrupts active, the IRQ has been released
1906 */
1907void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
1908{
1909	if (!ci->roles[CI_ROLE_GADGET])
1910		return;
1911
1912	usb_del_gadget_udc(&ci->gadget);
1913
1914	destroy_eps(ci);
1915
1916	dma_pool_destroy(ci->td_pool);
1917	dma_pool_destroy(ci->qh_pool);
1918}
1919
1920static int udc_id_switch_for_device(struct ci_hdrc *ci)
1921{
1922	if (ci->is_otg)
1923		/* Clear and enable BSV irq */
1924		hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
1925					OTGSC_BSVIS | OTGSC_BSVIE);
1926
1927	return 0;
1928}
1929
1930static void udc_id_switch_for_host(struct ci_hdrc *ci)
1931{
1932	/*
1933	 * host doesn't care B_SESSION_VALID event
1934	 * so clear and disbale BSV irq
1935	 */
1936	if (ci->is_otg)
1937		hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
1938}
1939
1940/**
1941 * ci_hdrc_gadget_init - initialize device related bits
1942 * ci: the controller
1943 *
1944 * This function initializes the gadget, if the device is "device capable".
1945 */
1946int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1947{
1948	struct ci_role_driver *rdrv;
1949
1950	if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1951		return -ENXIO;
1952
1953	rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1954	if (!rdrv)
1955		return -ENOMEM;
1956
1957	rdrv->start	= udc_id_switch_for_device;
1958	rdrv->stop	= udc_id_switch_for_host;
1959	rdrv->irq	= udc_irq;
1960	rdrv->name	= "gadget";
1961	ci->roles[CI_ROLE_GADGET] = rdrv;
1962
1963	return udc_start(ci);
1964}
1965