1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34
35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37
38#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
39
40/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
41static int link_quirk;
42module_param(link_quirk, int, S_IRUGO | S_IWUSR);
43MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
44
45static unsigned int quirks;
46module_param(quirks, uint, S_IRUGO);
47MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
48
49/* TODO: copied from ehci-hcd.c - can this be refactored? */
50/*
51 * xhci_handshake - spin reading hc until handshake completes or fails
52 * @ptr: address of hc register to be read
53 * @mask: bits to look at in result of read
54 * @done: value of those bits when handshake succeeds
55 * @usec: timeout in microseconds
56 *
57 * Returns negative errno, or zero on success
58 *
59 * Success happens when the "mask" bits have the specified value (hardware
60 * handshake done).  There are two failure modes:  "usec" have passed (major
61 * hardware flakeout), or the register reads as all-ones (hardware removed).
62 */
63int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
64{
65	u32	result;
66
67	do {
68		result = readl(ptr);
69		if (result == ~(u32)0)		/* card removed */
70			return -ENODEV;
71		result &= mask;
72		if (result == done)
73			return 0;
74		udelay(1);
75		usec--;
76	} while (usec > 0);
77	return -ETIMEDOUT;
78}
79
80/*
81 * Disable interrupts and begin the xHCI halting process.
82 */
83void xhci_quiesce(struct xhci_hcd *xhci)
84{
85	u32 halted;
86	u32 cmd;
87	u32 mask;
88
89	mask = ~(XHCI_IRQS);
90	halted = readl(&xhci->op_regs->status) & STS_HALT;
91	if (!halted)
92		mask &= ~CMD_RUN;
93
94	cmd = readl(&xhci->op_regs->command);
95	cmd &= mask;
96	writel(cmd, &xhci->op_regs->command);
97}
98
99/*
100 * Force HC into halt state.
101 *
102 * Disable any IRQs and clear the run/stop bit.
103 * HC will complete any current and actively pipelined transactions, and
104 * should halt within 16 ms of the run/stop bit being cleared.
105 * Read HC Halted bit in the status register to see when the HC is finished.
106 */
107int xhci_halt(struct xhci_hcd *xhci)
108{
109	int ret;
110	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
111	xhci_quiesce(xhci);
112
113	ret = xhci_handshake(&xhci->op_regs->status,
114			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
115	if (!ret) {
116		xhci->xhc_state |= XHCI_STATE_HALTED;
117		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
118	} else
119		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
120				XHCI_MAX_HALT_USEC);
121	return ret;
122}
123
124/*
125 * Set the run bit and wait for the host to be running.
126 */
127static int xhci_start(struct xhci_hcd *xhci)
128{
129	u32 temp;
130	int ret;
131
132	temp = readl(&xhci->op_regs->command);
133	temp |= (CMD_RUN);
134	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
135			temp);
136	writel(temp, &xhci->op_regs->command);
137
138	/*
139	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
140	 * running.
141	 */
142	ret = xhci_handshake(&xhci->op_regs->status,
143			STS_HALT, 0, XHCI_MAX_HALT_USEC);
144	if (ret == -ETIMEDOUT)
145		xhci_err(xhci, "Host took too long to start, "
146				"waited %u microseconds.\n",
147				XHCI_MAX_HALT_USEC);
148	if (!ret)
149		/* clear state flags. Including dying, halted or removing */
150		xhci->xhc_state = 0;
151
152	return ret;
153}
154
155/*
156 * Reset a halted HC.
157 *
158 * This resets pipelines, timers, counters, state machines, etc.
159 * Transactions will be terminated immediately, and operational registers
160 * will be set to their defaults.
161 */
162int xhci_reset(struct xhci_hcd *xhci)
163{
164	u32 command;
165	u32 state;
166	int ret, i;
167
168	state = readl(&xhci->op_regs->status);
169	if ((state & STS_HALT) == 0) {
170		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
171		return 0;
172	}
173
174	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
175	command = readl(&xhci->op_regs->command);
176	command |= CMD_RESET;
177	writel(command, &xhci->op_regs->command);
178
179	/* Existing Intel xHCI controllers require a delay of 1 mS,
180	 * after setting the CMD_RESET bit, and before accessing any
181	 * HC registers. This allows the HC to complete the
182	 * reset operation and be ready for HC register access.
183	 * Without this delay, the subsequent HC register access,
184	 * may result in a system hang very rarely.
185	 */
186	if (xhci->quirks & XHCI_INTEL_HOST)
187		udelay(1000);
188
189	ret = xhci_handshake(&xhci->op_regs->command,
190			CMD_RESET, 0, 10 * 1000 * 1000);
191	if (ret)
192		return ret;
193
194	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
195			 "Wait for controller to be ready for doorbell rings");
196	/*
197	 * xHCI cannot write to any doorbells or operational registers other
198	 * than status until the "Controller Not Ready" flag is cleared.
199	 */
200	ret = xhci_handshake(&xhci->op_regs->status,
201			STS_CNR, 0, 10 * 1000 * 1000);
202
203	for (i = 0; i < 2; ++i) {
204		xhci->bus_state[i].port_c_suspend = 0;
205		xhci->bus_state[i].suspended_ports = 0;
206		xhci->bus_state[i].resuming_ports = 0;
207	}
208
209	return ret;
210}
211
212#ifdef CONFIG_PCI
213static int xhci_free_msi(struct xhci_hcd *xhci)
214{
215	int i;
216
217	if (!xhci->msix_entries)
218		return -EINVAL;
219
220	for (i = 0; i < xhci->msix_count; i++)
221		if (xhci->msix_entries[i].vector)
222			free_irq(xhci->msix_entries[i].vector,
223					xhci_to_hcd(xhci));
224	return 0;
225}
226
227/*
228 * Set up MSI
229 */
230static int xhci_setup_msi(struct xhci_hcd *xhci)
231{
232	int ret;
233	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
234
235	ret = pci_enable_msi(pdev);
236	if (ret) {
237		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
238				"failed to allocate MSI entry");
239		return ret;
240	}
241
242	ret = request_irq(pdev->irq, xhci_msi_irq,
243				0, "xhci_hcd", xhci_to_hcd(xhci));
244	if (ret) {
245		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
246				"disable MSI interrupt");
247		pci_disable_msi(pdev);
248	}
249
250	return ret;
251}
252
253/*
254 * Free IRQs
255 * free all IRQs request
256 */
257static void xhci_free_irq(struct xhci_hcd *xhci)
258{
259	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
260	int ret;
261
262	/* return if using legacy interrupt */
263	if (xhci_to_hcd(xhci)->irq > 0)
264		return;
265
266	ret = xhci_free_msi(xhci);
267	if (!ret)
268		return;
269	if (pdev->irq > 0)
270		free_irq(pdev->irq, xhci_to_hcd(xhci));
271
272	return;
273}
274
275/*
276 * Set up MSI-X
277 */
278static int xhci_setup_msix(struct xhci_hcd *xhci)
279{
280	int i, ret = 0;
281	struct usb_hcd *hcd = xhci_to_hcd(xhci);
282	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
283
284	/*
285	 * calculate number of msi-x vectors supported.
286	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
287	 *   with max number of interrupters based on the xhci HCSPARAMS1.
288	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
289	 *   Add additional 1 vector to ensure always available interrupt.
290	 */
291	xhci->msix_count = min(num_online_cpus() + 1,
292				HCS_MAX_INTRS(xhci->hcs_params1));
293
294	xhci->msix_entries =
295		kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
296				GFP_KERNEL);
297	if (!xhci->msix_entries) {
298		xhci_err(xhci, "Failed to allocate MSI-X entries\n");
299		return -ENOMEM;
300	}
301
302	for (i = 0; i < xhci->msix_count; i++) {
303		xhci->msix_entries[i].entry = i;
304		xhci->msix_entries[i].vector = 0;
305	}
306
307	ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
308	if (ret) {
309		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
310				"Failed to enable MSI-X");
311		goto free_entries;
312	}
313
314	for (i = 0; i < xhci->msix_count; i++) {
315		ret = request_irq(xhci->msix_entries[i].vector,
316				xhci_msi_irq,
317				0, "xhci_hcd", xhci_to_hcd(xhci));
318		if (ret)
319			goto disable_msix;
320	}
321
322	hcd->msix_enabled = 1;
323	return ret;
324
325disable_msix:
326	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
327	xhci_free_irq(xhci);
328	pci_disable_msix(pdev);
329free_entries:
330	kfree(xhci->msix_entries);
331	xhci->msix_entries = NULL;
332	return ret;
333}
334
335/* Free any IRQs and disable MSI-X */
336static void xhci_cleanup_msix(struct xhci_hcd *xhci)
337{
338	struct usb_hcd *hcd = xhci_to_hcd(xhci);
339	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
340
341	if (xhci->quirks & XHCI_PLAT)
342		return;
343
344	xhci_free_irq(xhci);
345
346	if (xhci->msix_entries) {
347		pci_disable_msix(pdev);
348		kfree(xhci->msix_entries);
349		xhci->msix_entries = NULL;
350	} else {
351		pci_disable_msi(pdev);
352	}
353
354	hcd->msix_enabled = 0;
355	return;
356}
357
358static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
359{
360	int i;
361
362	if (xhci->msix_entries) {
363		for (i = 0; i < xhci->msix_count; i++)
364			synchronize_irq(xhci->msix_entries[i].vector);
365	}
366}
367
368static int xhci_try_enable_msi(struct usb_hcd *hcd)
369{
370	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
371	struct pci_dev  *pdev;
372	int ret;
373
374	/* The xhci platform device has set up IRQs through usb_add_hcd. */
375	if (xhci->quirks & XHCI_PLAT)
376		return 0;
377
378	pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
379	/*
380	 * Some Fresco Logic host controllers advertise MSI, but fail to
381	 * generate interrupts.  Don't even try to enable MSI.
382	 */
383	if (xhci->quirks & XHCI_BROKEN_MSI)
384		goto legacy_irq;
385
386	/* unregister the legacy interrupt */
387	if (hcd->irq)
388		free_irq(hcd->irq, hcd);
389	hcd->irq = 0;
390
391	ret = xhci_setup_msix(xhci);
392	if (ret)
393		/* fall back to msi*/
394		ret = xhci_setup_msi(xhci);
395
396	if (!ret)
397		/* hcd->irq is 0, we have MSI */
398		return 0;
399
400	if (!pdev->irq) {
401		xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
402		return -EINVAL;
403	}
404
405 legacy_irq:
406	if (!strlen(hcd->irq_descr))
407		snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
408			 hcd->driver->description, hcd->self.busnum);
409
410	/* fall back to legacy interrupt*/
411	ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
412			hcd->irq_descr, hcd);
413	if (ret) {
414		xhci_err(xhci, "request interrupt %d failed\n",
415				pdev->irq);
416		return ret;
417	}
418	hcd->irq = pdev->irq;
419	return 0;
420}
421
422#else
423
424static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
425{
426	return 0;
427}
428
429static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
430{
431}
432
433static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
434{
435}
436
437#endif
438
439static void compliance_mode_recovery(unsigned long arg)
440{
441	struct xhci_hcd *xhci;
442	struct usb_hcd *hcd;
443	u32 temp;
444	int i;
445
446	xhci = (struct xhci_hcd *)arg;
447
448	for (i = 0; i < xhci->num_usb3_ports; i++) {
449		temp = readl(xhci->usb3_ports[i]);
450		if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
451			/*
452			 * Compliance Mode Detected. Letting USB Core
453			 * handle the Warm Reset
454			 */
455			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
456					"Compliance mode detected->port %d",
457					i + 1);
458			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
459					"Attempting compliance mode recovery");
460			hcd = xhci->shared_hcd;
461
462			if (hcd->state == HC_STATE_SUSPENDED)
463				usb_hcd_resume_root_hub(hcd);
464
465			usb_hcd_poll_rh_status(hcd);
466		}
467	}
468
469	if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
470		mod_timer(&xhci->comp_mode_recovery_timer,
471			jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
472}
473
474/*
475 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
476 * that causes ports behind that hardware to enter compliance mode sometimes.
477 * The quirk creates a timer that polls every 2 seconds the link state of
478 * each host controller's port and recovers it by issuing a Warm reset
479 * if Compliance mode is detected, otherwise the port will become "dead" (no
480 * device connections or disconnections will be detected anymore). Becasue no
481 * status event is generated when entering compliance mode (per xhci spec),
482 * this quirk is needed on systems that have the failing hardware installed.
483 */
484static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
485{
486	xhci->port_status_u0 = 0;
487	setup_timer(&xhci->comp_mode_recovery_timer,
488		    compliance_mode_recovery, (unsigned long)xhci);
489	xhci->comp_mode_recovery_timer.expires = jiffies +
490			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
491
492	set_timer_slack(&xhci->comp_mode_recovery_timer,
493			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
494	add_timer(&xhci->comp_mode_recovery_timer);
495	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
496			"Compliance mode recovery timer initialized");
497}
498
499/*
500 * This function identifies the systems that have installed the SN65LVPE502CP
501 * USB3.0 re-driver and that need the Compliance Mode Quirk.
502 * Systems:
503 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
504 */
505static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
506{
507	const char *dmi_product_name, *dmi_sys_vendor;
508
509	dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
510	dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
511	if (!dmi_product_name || !dmi_sys_vendor)
512		return false;
513
514	if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
515		return false;
516
517	if (strstr(dmi_product_name, "Z420") ||
518			strstr(dmi_product_name, "Z620") ||
519			strstr(dmi_product_name, "Z820") ||
520			strstr(dmi_product_name, "Z1 Workstation"))
521		return true;
522
523	return false;
524}
525
526static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
527{
528	return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
529}
530
531
532/*
533 * Initialize memory for HCD and xHC (one-time init).
534 *
535 * Program the PAGESIZE register, initialize the device context array, create
536 * device contexts (?), set up a command ring segment (or two?), create event
537 * ring (one for now).
538 */
539int xhci_init(struct usb_hcd *hcd)
540{
541	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
542	int retval = 0;
543
544	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
545	spin_lock_init(&xhci->lock);
546	if (xhci->hci_version == 0x95 && link_quirk) {
547		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
548				"QUIRK: Not clearing Link TRB chain bits.");
549		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
550	} else {
551		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
552				"xHCI doesn't need link TRB QUIRK");
553	}
554	retval = xhci_mem_init(xhci, GFP_KERNEL);
555	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
556
557	/* Initializing Compliance Mode Recovery Data If Needed */
558	if (xhci_compliance_mode_recovery_timer_quirk_check()) {
559		xhci->quirks |= XHCI_COMP_MODE_QUIRK;
560		compliance_mode_recovery_timer_init(xhci);
561	}
562
563	return retval;
564}
565
566/*-------------------------------------------------------------------------*/
567
568
569static int xhci_run_finished(struct xhci_hcd *xhci)
570{
571	if (xhci_start(xhci)) {
572		xhci_halt(xhci);
573		return -ENODEV;
574	}
575	xhci->shared_hcd->state = HC_STATE_RUNNING;
576	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
577
578	if (xhci->quirks & XHCI_NEC_HOST)
579		xhci_ring_cmd_db(xhci);
580
581	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
582			"Finished xhci_run for USB3 roothub");
583	return 0;
584}
585
586/*
587 * Start the HC after it was halted.
588 *
589 * This function is called by the USB core when the HC driver is added.
590 * Its opposite is xhci_stop().
591 *
592 * xhci_init() must be called once before this function can be called.
593 * Reset the HC, enable device slot contexts, program DCBAAP, and
594 * set command ring pointer and event ring pointer.
595 *
596 * Setup MSI-X vectors and enable interrupts.
597 */
598int xhci_run(struct usb_hcd *hcd)
599{
600	u32 temp;
601	u64 temp_64;
602	int ret;
603	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
604
605	/* Start the xHCI host controller running only after the USB 2.0 roothub
606	 * is setup.
607	 */
608
609	hcd->uses_new_polling = 1;
610	if (!usb_hcd_is_primary_hcd(hcd))
611		return xhci_run_finished(xhci);
612
613	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
614
615	ret = xhci_try_enable_msi(hcd);
616	if (ret)
617		return ret;
618
619	xhci_dbg(xhci, "Command ring memory map follows:\n");
620	xhci_debug_ring(xhci, xhci->cmd_ring);
621	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
622	xhci_dbg_cmd_ptrs(xhci);
623
624	xhci_dbg(xhci, "ERST memory map follows:\n");
625	xhci_dbg_erst(xhci, &xhci->erst);
626	xhci_dbg(xhci, "Event ring:\n");
627	xhci_debug_ring(xhci, xhci->event_ring);
628	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
629	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
630	temp_64 &= ~ERST_PTR_MASK;
631	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
632			"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
633
634	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
635			"// Set the interrupt modulation register");
636	temp = readl(&xhci->ir_set->irq_control);
637	temp &= ~ER_IRQ_INTERVAL_MASK;
638	temp |= (u32) 160;
639	writel(temp, &xhci->ir_set->irq_control);
640
641	/* Set the HCD state before we enable the irqs */
642	temp = readl(&xhci->op_regs->command);
643	temp |= (CMD_EIE);
644	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
645			"// Enable interrupts, cmd = 0x%x.", temp);
646	writel(temp, &xhci->op_regs->command);
647
648	temp = readl(&xhci->ir_set->irq_pending);
649	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
650			"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
651			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
652	writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
653	xhci_print_ir_set(xhci, 0);
654
655	if (xhci->quirks & XHCI_NEC_HOST) {
656		struct xhci_command *command;
657		command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
658		if (!command)
659			return -ENOMEM;
660		xhci_queue_vendor_command(xhci, command, 0, 0, 0,
661				TRB_TYPE(TRB_NEC_GET_FW));
662	}
663	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664			"Finished xhci_run for USB2 roothub");
665	return 0;
666}
667EXPORT_SYMBOL_GPL(xhci_run);
668
669static void xhci_only_stop_hcd(struct usb_hcd *hcd)
670{
671	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
672
673	spin_lock_irq(&xhci->lock);
674	xhci_halt(xhci);
675
676	/* The shared_hcd is going to be deallocated shortly (the USB core only
677	 * calls this function when allocation fails in usb_add_hcd(), or
678	 * usb_remove_hcd() is called).  So we need to unset xHCI's pointer.
679	 */
680	xhci->shared_hcd = NULL;
681	spin_unlock_irq(&xhci->lock);
682}
683
684/*
685 * Stop xHCI driver.
686 *
687 * This function is called by the USB core when the HC driver is removed.
688 * Its opposite is xhci_run().
689 *
690 * Disable device contexts, disable IRQs, and quiesce the HC.
691 * Reset the HC, finish any completed transactions, and cleanup memory.
692 */
693void xhci_stop(struct usb_hcd *hcd)
694{
695	u32 temp;
696	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
697
698	mutex_lock(&xhci->mutex);
699
700	if (!usb_hcd_is_primary_hcd(hcd)) {
701		xhci_only_stop_hcd(xhci->shared_hcd);
702		mutex_unlock(&xhci->mutex);
703		return;
704	}
705
706	spin_lock_irq(&xhci->lock);
707	/* Make sure the xHC is halted for a USB3 roothub
708	 * (xhci_stop() could be called as part of failed init).
709	 */
710	xhci_halt(xhci);
711	xhci_reset(xhci);
712	spin_unlock_irq(&xhci->lock);
713
714	xhci_cleanup_msix(xhci);
715
716	/* Deleting Compliance Mode Recovery Timer */
717	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
718			(!(xhci_all_ports_seen_u0(xhci)))) {
719		del_timer_sync(&xhci->comp_mode_recovery_timer);
720		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
721				"%s: compliance mode recovery timer deleted",
722				__func__);
723	}
724
725	if (xhci->quirks & XHCI_AMD_PLL_FIX)
726		usb_amd_dev_put();
727
728	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
729			"// Disabling event ring interrupts");
730	temp = readl(&xhci->op_regs->status);
731	writel(temp & ~STS_EINT, &xhci->op_regs->status);
732	temp = readl(&xhci->ir_set->irq_pending);
733	writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
734	xhci_print_ir_set(xhci, 0);
735
736	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
737	xhci_mem_cleanup(xhci);
738	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
739			"xhci_stop completed - status = %x",
740			readl(&xhci->op_regs->status));
741	mutex_unlock(&xhci->mutex);
742}
743
744/*
745 * Shutdown HC (not bus-specific)
746 *
747 * This is called when the machine is rebooting or halting.  We assume that the
748 * machine will be powered off, and the HC's internal state will be reset.
749 * Don't bother to free memory.
750 *
751 * This will only ever be called with the main usb_hcd (the USB3 roothub).
752 */
753void xhci_shutdown(struct usb_hcd *hcd)
754{
755	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
756
757	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
758		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
759
760	spin_lock_irq(&xhci->lock);
761	xhci_halt(xhci);
762	/* Workaround for spurious wakeups at shutdown with HSW */
763	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
764		xhci_reset(xhci);
765	spin_unlock_irq(&xhci->lock);
766
767	xhci_cleanup_msix(xhci);
768
769	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
770			"xhci_shutdown completed - status = %x",
771			readl(&xhci->op_regs->status));
772
773	/* Yet another workaround for spurious wakeups at shutdown with HSW */
774	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
775		pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
776}
777
778#ifdef CONFIG_PM
779static void xhci_save_registers(struct xhci_hcd *xhci)
780{
781	xhci->s3.command = readl(&xhci->op_regs->command);
782	xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
783	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
784	xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
785	xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
786	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
787	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
788	xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
789	xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
790}
791
792static void xhci_restore_registers(struct xhci_hcd *xhci)
793{
794	writel(xhci->s3.command, &xhci->op_regs->command);
795	writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
796	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
797	writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
798	writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
799	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
800	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
801	writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
802	writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
803}
804
805static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
806{
807	u64	val_64;
808
809	/* step 2: initialize command ring buffer */
810	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
811	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
812		(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
813				      xhci->cmd_ring->dequeue) &
814		 (u64) ~CMD_RING_RSVD_BITS) |
815		xhci->cmd_ring->cycle_state;
816	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
817			"// Setting command ring address to 0x%llx",
818			(long unsigned long) val_64);
819	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
820}
821
822/*
823 * The whole command ring must be cleared to zero when we suspend the host.
824 *
825 * The host doesn't save the command ring pointer in the suspend well, so we
826 * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
827 * aligned, because of the reserved bits in the command ring dequeue pointer
828 * register.  Therefore, we can't just set the dequeue pointer back in the
829 * middle of the ring (TRBs are 16-byte aligned).
830 */
831static void xhci_clear_command_ring(struct xhci_hcd *xhci)
832{
833	struct xhci_ring *ring;
834	struct xhci_segment *seg;
835
836	ring = xhci->cmd_ring;
837	seg = ring->deq_seg;
838	do {
839		memset(seg->trbs, 0,
840			sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
841		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
842			cpu_to_le32(~TRB_CYCLE);
843		seg = seg->next;
844	} while (seg != ring->deq_seg);
845
846	/* Reset the software enqueue and dequeue pointers */
847	ring->deq_seg = ring->first_seg;
848	ring->dequeue = ring->first_seg->trbs;
849	ring->enq_seg = ring->deq_seg;
850	ring->enqueue = ring->dequeue;
851
852	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
853	/*
854	 * Ring is now zeroed, so the HW should look for change of ownership
855	 * when the cycle bit is set to 1.
856	 */
857	ring->cycle_state = 1;
858
859	/*
860	 * Reset the hardware dequeue pointer.
861	 * Yes, this will need to be re-written after resume, but we're paranoid
862	 * and want to make sure the hardware doesn't access bogus memory
863	 * because, say, the BIOS or an SMI started the host without changing
864	 * the command ring pointers.
865	 */
866	xhci_set_cmd_ring_deq(xhci);
867}
868
869static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
870{
871	int port_index;
872	__le32 __iomem **port_array;
873	unsigned long flags;
874	u32 t1, t2;
875
876	spin_lock_irqsave(&xhci->lock, flags);
877
878	/* disble usb3 ports Wake bits*/
879	port_index = xhci->num_usb3_ports;
880	port_array = xhci->usb3_ports;
881	while (port_index--) {
882		t1 = readl(port_array[port_index]);
883		t1 = xhci_port_state_to_neutral(t1);
884		t2 = t1 & ~PORT_WAKE_BITS;
885		if (t1 != t2)
886			writel(t2, port_array[port_index]);
887	}
888
889	/* disble usb2 ports Wake bits*/
890	port_index = xhci->num_usb2_ports;
891	port_array = xhci->usb2_ports;
892	while (port_index--) {
893		t1 = readl(port_array[port_index]);
894		t1 = xhci_port_state_to_neutral(t1);
895		t2 = t1 & ~PORT_WAKE_BITS;
896		if (t1 != t2)
897			writel(t2, port_array[port_index]);
898	}
899
900	spin_unlock_irqrestore(&xhci->lock, flags);
901}
902
903/*
904 * Stop HC (not bus-specific)
905 *
906 * This is called when the machine transition into S3/S4 mode.
907 *
908 */
909int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
910{
911	int			rc = 0;
912	unsigned int		delay = XHCI_MAX_HALT_USEC;
913	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
914	u32			command;
915
916	if (hcd->state != HC_STATE_SUSPENDED ||
917			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
918		return -EINVAL;
919
920	/* Clear root port wake on bits if wakeup not allowed. */
921	if (!do_wakeup)
922		xhci_disable_port_wake_on_bits(xhci);
923
924	/* Don't poll the roothubs on bus suspend. */
925	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
926	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
927	del_timer_sync(&hcd->rh_timer);
928	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
929	del_timer_sync(&xhci->shared_hcd->rh_timer);
930
931	spin_lock_irq(&xhci->lock);
932	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
933	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
934	/* step 1: stop endpoint */
935	/* skipped assuming that port suspend has done */
936
937	/* step 2: clear Run/Stop bit */
938	command = readl(&xhci->op_regs->command);
939	command &= ~CMD_RUN;
940	writel(command, &xhci->op_regs->command);
941
942	/* Some chips from Fresco Logic need an extraordinary delay */
943	delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
944
945	if (xhci_handshake(&xhci->op_regs->status,
946		      STS_HALT, STS_HALT, delay)) {
947		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
948		spin_unlock_irq(&xhci->lock);
949		return -ETIMEDOUT;
950	}
951	xhci_clear_command_ring(xhci);
952
953	/* step 3: save registers */
954	xhci_save_registers(xhci);
955
956	/* step 4: set CSS flag */
957	command = readl(&xhci->op_regs->command);
958	command |= CMD_CSS;
959	writel(command, &xhci->op_regs->command);
960	if (xhci_handshake(&xhci->op_regs->status,
961				STS_SAVE, 0, 10 * 1000)) {
962		xhci_warn(xhci, "WARN: xHC save state timeout\n");
963		spin_unlock_irq(&xhci->lock);
964		return -ETIMEDOUT;
965	}
966	spin_unlock_irq(&xhci->lock);
967
968	/*
969	 * Deleting Compliance Mode Recovery Timer because the xHCI Host
970	 * is about to be suspended.
971	 */
972	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
973			(!(xhci_all_ports_seen_u0(xhci)))) {
974		del_timer_sync(&xhci->comp_mode_recovery_timer);
975		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
976				"%s: compliance mode recovery timer deleted",
977				__func__);
978	}
979
980	/* step 5: remove core well power */
981	/* synchronize irq when using MSI-X */
982	xhci_msix_sync_irqs(xhci);
983
984	return rc;
985}
986EXPORT_SYMBOL_GPL(xhci_suspend);
987
988/*
989 * start xHC (not bus-specific)
990 *
991 * This is called when the machine transition from S3/S4 mode.
992 *
993 */
994int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
995{
996	u32			command, temp = 0, status;
997	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
998	struct usb_hcd		*secondary_hcd;
999	int			retval = 0;
1000	bool			comp_timer_running = false;
1001
1002	/* Wait a bit if either of the roothubs need to settle from the
1003	 * transition into bus suspend.
1004	 */
1005	if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1006			time_before(jiffies,
1007				xhci->bus_state[1].next_statechange))
1008		msleep(100);
1009
1010	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1011	set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1012
1013	spin_lock_irq(&xhci->lock);
1014	if (xhci->quirks & XHCI_RESET_ON_RESUME)
1015		hibernated = true;
1016
1017	if (!hibernated) {
1018		/* step 1: restore register */
1019		xhci_restore_registers(xhci);
1020		/* step 2: initialize command ring buffer */
1021		xhci_set_cmd_ring_deq(xhci);
1022		/* step 3: restore state and start state*/
1023		/* step 3: set CRS flag */
1024		command = readl(&xhci->op_regs->command);
1025		command |= CMD_CRS;
1026		writel(command, &xhci->op_regs->command);
1027		if (xhci_handshake(&xhci->op_regs->status,
1028			      STS_RESTORE, 0, 10 * 1000)) {
1029			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1030			spin_unlock_irq(&xhci->lock);
1031			return -ETIMEDOUT;
1032		}
1033		temp = readl(&xhci->op_regs->status);
1034	}
1035
1036	/* If restore operation fails, re-initialize the HC during resume */
1037	if ((temp & STS_SRE) || hibernated) {
1038
1039		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1040				!(xhci_all_ports_seen_u0(xhci))) {
1041			del_timer_sync(&xhci->comp_mode_recovery_timer);
1042			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1043				"Compliance Mode Recovery Timer deleted!");
1044		}
1045
1046		/* Let the USB core know _both_ roothubs lost power. */
1047		usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1048		usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1049
1050		xhci_dbg(xhci, "Stop HCD\n");
1051		xhci_halt(xhci);
1052		xhci_reset(xhci);
1053		spin_unlock_irq(&xhci->lock);
1054		xhci_cleanup_msix(xhci);
1055
1056		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1057		temp = readl(&xhci->op_regs->status);
1058		writel(temp & ~STS_EINT, &xhci->op_regs->status);
1059		temp = readl(&xhci->ir_set->irq_pending);
1060		writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1061		xhci_print_ir_set(xhci, 0);
1062
1063		xhci_dbg(xhci, "cleaning up memory\n");
1064		xhci_mem_cleanup(xhci);
1065		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1066			    readl(&xhci->op_regs->status));
1067
1068		/* USB core calls the PCI reinit and start functions twice:
1069		 * first with the primary HCD, and then with the secondary HCD.
1070		 * If we don't do the same, the host will never be started.
1071		 */
1072		if (!usb_hcd_is_primary_hcd(hcd))
1073			secondary_hcd = hcd;
1074		else
1075			secondary_hcd = xhci->shared_hcd;
1076
1077		xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1078		retval = xhci_init(hcd->primary_hcd);
1079		if (retval)
1080			return retval;
1081		comp_timer_running = true;
1082
1083		xhci_dbg(xhci, "Start the primary HCD\n");
1084		retval = xhci_run(hcd->primary_hcd);
1085		if (!retval) {
1086			xhci_dbg(xhci, "Start the secondary HCD\n");
1087			retval = xhci_run(secondary_hcd);
1088		}
1089		hcd->state = HC_STATE_SUSPENDED;
1090		xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1091		goto done;
1092	}
1093
1094	/* step 4: set Run/Stop bit */
1095	command = readl(&xhci->op_regs->command);
1096	command |= CMD_RUN;
1097	writel(command, &xhci->op_regs->command);
1098	xhci_handshake(&xhci->op_regs->status, STS_HALT,
1099		  0, 250 * 1000);
1100
1101	/* step 5: walk topology and initialize portsc,
1102	 * portpmsc and portli
1103	 */
1104	/* this is done in bus_resume */
1105
1106	/* step 6: restart each of the previously
1107	 * Running endpoints by ringing their doorbells
1108	 */
1109
1110	spin_unlock_irq(&xhci->lock);
1111
1112 done:
1113	if (retval == 0) {
1114		/* Resume root hubs only when have pending events. */
1115		status = readl(&xhci->op_regs->status);
1116		if (status & STS_EINT) {
1117			usb_hcd_resume_root_hub(xhci->shared_hcd);
1118			usb_hcd_resume_root_hub(hcd);
1119		}
1120	}
1121
1122	/*
1123	 * If system is subject to the Quirk, Compliance Mode Timer needs to
1124	 * be re-initialized Always after a system resume. Ports are subject
1125	 * to suffer the Compliance Mode issue again. It doesn't matter if
1126	 * ports have entered previously to U0 before system's suspension.
1127	 */
1128	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1129		compliance_mode_recovery_timer_init(xhci);
1130
1131	/* Re-enable port polling. */
1132	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1133	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1134	usb_hcd_poll_rh_status(xhci->shared_hcd);
1135	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1136	usb_hcd_poll_rh_status(hcd);
1137
1138	return retval;
1139}
1140EXPORT_SYMBOL_GPL(xhci_resume);
1141#endif	/* CONFIG_PM */
1142
1143/*-------------------------------------------------------------------------*/
1144
1145/**
1146 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1147 * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
1148 * value to right shift 1 for the bitmask.
1149 *
1150 * Index  = (epnum * 2) + direction - 1,
1151 * where direction = 0 for OUT, 1 for IN.
1152 * For control endpoints, the IN index is used (OUT index is unused), so
1153 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1154 */
1155unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1156{
1157	unsigned int index;
1158	if (usb_endpoint_xfer_control(desc))
1159		index = (unsigned int) (usb_endpoint_num(desc)*2);
1160	else
1161		index = (unsigned int) (usb_endpoint_num(desc)*2) +
1162			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1163	return index;
1164}
1165
1166/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1167 * address from the XHCI endpoint index.
1168 */
1169unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1170{
1171	unsigned int number = DIV_ROUND_UP(ep_index, 2);
1172	unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1173	return direction | number;
1174}
1175
1176/* Find the flag for this endpoint (for use in the control context).  Use the
1177 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1178 * bit 1, etc.
1179 */
1180unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1181{
1182	return 1 << (xhci_get_endpoint_index(desc) + 1);
1183}
1184
1185/* Find the flag for this endpoint (for use in the control context).  Use the
1186 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1187 * bit 1, etc.
1188 */
1189unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1190{
1191	return 1 << (ep_index + 1);
1192}
1193
1194/* Compute the last valid endpoint context index.  Basically, this is the
1195 * endpoint index plus one.  For slot contexts with more than valid endpoint,
1196 * we find the most significant bit set in the added contexts flags.
1197 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1198 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1199 */
1200unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1201{
1202	return fls(added_ctxs) - 1;
1203}
1204
1205/* Returns 1 if the arguments are OK;
1206 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1207 */
1208static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1209		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1210		const char *func) {
1211	struct xhci_hcd	*xhci;
1212	struct xhci_virt_device	*virt_dev;
1213
1214	if (!hcd || (check_ep && !ep) || !udev) {
1215		pr_debug("xHCI %s called with invalid args\n", func);
1216		return -EINVAL;
1217	}
1218	if (!udev->parent) {
1219		pr_debug("xHCI %s called for root hub\n", func);
1220		return 0;
1221	}
1222
1223	xhci = hcd_to_xhci(hcd);
1224	if (check_virt_dev) {
1225		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1226			xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1227					func);
1228			return -EINVAL;
1229		}
1230
1231		virt_dev = xhci->devs[udev->slot_id];
1232		if (virt_dev->udev != udev) {
1233			xhci_dbg(xhci, "xHCI %s called with udev and "
1234					  "virt_dev does not match\n", func);
1235			return -EINVAL;
1236		}
1237	}
1238
1239	if (xhci->xhc_state & XHCI_STATE_HALTED)
1240		return -ENODEV;
1241
1242	return 1;
1243}
1244
1245static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1246		struct usb_device *udev, struct xhci_command *command,
1247		bool ctx_change, bool must_succeed);
1248
1249/*
1250 * Full speed devices may have a max packet size greater than 8 bytes, but the
1251 * USB core doesn't know that until it reads the first 8 bytes of the
1252 * descriptor.  If the usb_device's max packet size changes after that point,
1253 * we need to issue an evaluate context command and wait on it.
1254 */
1255static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1256		unsigned int ep_index, struct urb *urb)
1257{
1258	struct xhci_container_ctx *out_ctx;
1259	struct xhci_input_control_ctx *ctrl_ctx;
1260	struct xhci_ep_ctx *ep_ctx;
1261	struct xhci_command *command;
1262	int max_packet_size;
1263	int hw_max_packet_size;
1264	int ret = 0;
1265
1266	out_ctx = xhci->devs[slot_id]->out_ctx;
1267	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1268	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1269	max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1270	if (hw_max_packet_size != max_packet_size) {
1271		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1272				"Max Packet Size for ep 0 changed.");
1273		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1274				"Max packet size in usb_device = %d",
1275				max_packet_size);
1276		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1277				"Max packet size in xHCI HW = %d",
1278				hw_max_packet_size);
1279		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1280				"Issuing evaluate context command.");
1281
1282		/* Set up the input context flags for the command */
1283		/* FIXME: This won't work if a non-default control endpoint
1284		 * changes max packet sizes.
1285		 */
1286
1287		command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1288		if (!command)
1289			return -ENOMEM;
1290
1291		command->in_ctx = xhci->devs[slot_id]->in_ctx;
1292		ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1293		if (!ctrl_ctx) {
1294			xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1295					__func__);
1296			ret = -ENOMEM;
1297			goto command_cleanup;
1298		}
1299		/* Set up the modified control endpoint 0 */
1300		xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1301				xhci->devs[slot_id]->out_ctx, ep_index);
1302
1303		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1304		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1305		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1306
1307		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1308		ctrl_ctx->drop_flags = 0;
1309
1310		xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1311		xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1312		xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1313		xhci_dbg_ctx(xhci, out_ctx, ep_index);
1314
1315		ret = xhci_configure_endpoint(xhci, urb->dev, command,
1316				true, false);
1317
1318		/* Clean up the input context for later use by bandwidth
1319		 * functions.
1320		 */
1321		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1322command_cleanup:
1323		kfree(command->completion);
1324		kfree(command);
1325	}
1326	return ret;
1327}
1328
1329/*
1330 * non-error returns are a promise to giveback() the urb later
1331 * we drop ownership so next owner (or urb unlink) can get it
1332 */
1333int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1334{
1335	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1336	struct xhci_td *buffer;
1337	unsigned long flags;
1338	int ret = 0;
1339	unsigned int slot_id, ep_index;
1340	struct urb_priv	*urb_priv;
1341	int size, i;
1342
1343	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1344					true, true, __func__) <= 0)
1345		return -EINVAL;
1346
1347	slot_id = urb->dev->slot_id;
1348	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1349
1350	if (!HCD_HW_ACCESSIBLE(hcd)) {
1351		if (!in_interrupt())
1352			xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1353		ret = -ESHUTDOWN;
1354		goto exit;
1355	}
1356
1357	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1358		size = urb->number_of_packets;
1359	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1360	    urb->transfer_buffer_length > 0 &&
1361	    urb->transfer_flags & URB_ZERO_PACKET &&
1362	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1363		size = 2;
1364	else
1365		size = 1;
1366
1367	urb_priv = kzalloc(sizeof(struct urb_priv) +
1368				  size * sizeof(struct xhci_td *), mem_flags);
1369	if (!urb_priv)
1370		return -ENOMEM;
1371
1372	buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1373	if (!buffer) {
1374		kfree(urb_priv);
1375		return -ENOMEM;
1376	}
1377
1378	for (i = 0; i < size; i++) {
1379		urb_priv->td[i] = buffer;
1380		buffer++;
1381	}
1382
1383	urb_priv->length = size;
1384	urb_priv->td_cnt = 0;
1385	urb->hcpriv = urb_priv;
1386
1387	if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1388		/* Check to see if the max packet size for the default control
1389		 * endpoint changed during FS device enumeration
1390		 */
1391		if (urb->dev->speed == USB_SPEED_FULL) {
1392			ret = xhci_check_maxpacket(xhci, slot_id,
1393					ep_index, urb);
1394			if (ret < 0) {
1395				xhci_urb_free_priv(urb_priv);
1396				urb->hcpriv = NULL;
1397				return ret;
1398			}
1399		}
1400
1401		/* We have a spinlock and interrupts disabled, so we must pass
1402		 * atomic context to this function, which may allocate memory.
1403		 */
1404		spin_lock_irqsave(&xhci->lock, flags);
1405		if (xhci->xhc_state & XHCI_STATE_DYING)
1406			goto dying;
1407		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1408				slot_id, ep_index);
1409		if (ret)
1410			goto free_priv;
1411		spin_unlock_irqrestore(&xhci->lock, flags);
1412	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1413		spin_lock_irqsave(&xhci->lock, flags);
1414		if (xhci->xhc_state & XHCI_STATE_DYING)
1415			goto dying;
1416		if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1417				EP_GETTING_STREAMS) {
1418			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1419					"is transitioning to using streams.\n");
1420			ret = -EINVAL;
1421		} else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1422				EP_GETTING_NO_STREAMS) {
1423			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1424					"is transitioning to "
1425					"not having streams.\n");
1426			ret = -EINVAL;
1427		} else {
1428			ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1429					slot_id, ep_index);
1430		}
1431		if (ret)
1432			goto free_priv;
1433		spin_unlock_irqrestore(&xhci->lock, flags);
1434	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1435		spin_lock_irqsave(&xhci->lock, flags);
1436		if (xhci->xhc_state & XHCI_STATE_DYING)
1437			goto dying;
1438		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1439				slot_id, ep_index);
1440		if (ret)
1441			goto free_priv;
1442		spin_unlock_irqrestore(&xhci->lock, flags);
1443	} else {
1444		spin_lock_irqsave(&xhci->lock, flags);
1445		if (xhci->xhc_state & XHCI_STATE_DYING)
1446			goto dying;
1447		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1448				slot_id, ep_index);
1449		if (ret)
1450			goto free_priv;
1451		spin_unlock_irqrestore(&xhci->lock, flags);
1452	}
1453exit:
1454	return ret;
1455dying:
1456	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1457			"non-responsive xHCI host.\n",
1458			urb->ep->desc.bEndpointAddress, urb);
1459	ret = -ESHUTDOWN;
1460free_priv:
1461	xhci_urb_free_priv(urb_priv);
1462	urb->hcpriv = NULL;
1463	spin_unlock_irqrestore(&xhci->lock, flags);
1464	return ret;
1465}
1466
1467/* Get the right ring for the given URB.
1468 * If the endpoint supports streams, boundary check the URB's stream ID.
1469 * If the endpoint doesn't support streams, return the singular endpoint ring.
1470 */
1471static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1472		struct urb *urb)
1473{
1474	unsigned int slot_id;
1475	unsigned int ep_index;
1476	unsigned int stream_id;
1477	struct xhci_virt_ep *ep;
1478
1479	slot_id = urb->dev->slot_id;
1480	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1481	stream_id = urb->stream_id;
1482	ep = &xhci->devs[slot_id]->eps[ep_index];
1483	/* Common case: no streams */
1484	if (!(ep->ep_state & EP_HAS_STREAMS))
1485		return ep->ring;
1486
1487	if (stream_id == 0) {
1488		xhci_warn(xhci,
1489				"WARN: Slot ID %u, ep index %u has streams, "
1490				"but URB has no stream ID.\n",
1491				slot_id, ep_index);
1492		return NULL;
1493	}
1494
1495	if (stream_id < ep->stream_info->num_streams)
1496		return ep->stream_info->stream_rings[stream_id];
1497
1498	xhci_warn(xhci,
1499			"WARN: Slot ID %u, ep index %u has "
1500			"stream IDs 1 to %u allocated, "
1501			"but stream ID %u is requested.\n",
1502			slot_id, ep_index,
1503			ep->stream_info->num_streams - 1,
1504			stream_id);
1505	return NULL;
1506}
1507
1508/*
1509 * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1510 * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1511 * should pick up where it left off in the TD, unless a Set Transfer Ring
1512 * Dequeue Pointer is issued.
1513 *
1514 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1515 * the ring.  Since the ring is a contiguous structure, they can't be physically
1516 * removed.  Instead, there are two options:
1517 *
1518 *  1) If the HC is in the middle of processing the URB to be canceled, we
1519 *     simply move the ring's dequeue pointer past those TRBs using the Set
1520 *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1521 *     when drivers timeout on the last submitted URB and attempt to cancel.
1522 *
1523 *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1524 *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1525 *     HC will need to invalidate the any TRBs it has cached after the stop
1526 *     endpoint command, as noted in the xHCI 0.95 errata.
1527 *
1528 *  3) The TD may have completed by the time the Stop Endpoint Command
1529 *     completes, so software needs to handle that case too.
1530 *
1531 * This function should protect against the TD enqueueing code ringing the
1532 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1533 * It also needs to account for multiple cancellations on happening at the same
1534 * time for the same endpoint.
1535 *
1536 * Note that this function can be called in any context, or so says
1537 * usb_hcd_unlink_urb()
1538 */
1539int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1540{
1541	unsigned long flags;
1542	int ret, i;
1543	u32 temp;
1544	struct xhci_hcd *xhci;
1545	struct urb_priv	*urb_priv;
1546	struct xhci_td *td;
1547	unsigned int ep_index;
1548	struct xhci_ring *ep_ring;
1549	struct xhci_virt_ep *ep;
1550	struct xhci_command *command;
1551
1552	xhci = hcd_to_xhci(hcd);
1553	spin_lock_irqsave(&xhci->lock, flags);
1554	/* Make sure the URB hasn't completed or been unlinked already */
1555	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1556	if (ret || !urb->hcpriv)
1557		goto done;
1558	temp = readl(&xhci->op_regs->status);
1559	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1560		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1561				"HW died, freeing TD.");
1562		urb_priv = urb->hcpriv;
1563		for (i = urb_priv->td_cnt;
1564		     i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1565		     i++) {
1566			td = urb_priv->td[i];
1567			if (!list_empty(&td->td_list))
1568				list_del_init(&td->td_list);
1569			if (!list_empty(&td->cancelled_td_list))
1570				list_del_init(&td->cancelled_td_list);
1571		}
1572
1573		usb_hcd_unlink_urb_from_ep(hcd, urb);
1574		spin_unlock_irqrestore(&xhci->lock, flags);
1575		usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1576		xhci_urb_free_priv(urb_priv);
1577		return ret;
1578	}
1579	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1580			(xhci->xhc_state & XHCI_STATE_HALTED)) {
1581		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1582				"Ep 0x%x: URB %p to be canceled on "
1583				"non-responsive xHCI host.",
1584				urb->ep->desc.bEndpointAddress, urb);
1585		/* Let the stop endpoint command watchdog timer (which set this
1586		 * state) finish cleaning up the endpoint TD lists.  We must
1587		 * have caught it in the middle of dropping a lock and giving
1588		 * back an URB.
1589		 */
1590		goto done;
1591	}
1592
1593	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1594	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1595	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1596	if (!ep_ring) {
1597		ret = -EINVAL;
1598		goto done;
1599	}
1600
1601	urb_priv = urb->hcpriv;
1602	i = urb_priv->td_cnt;
1603	if (i < urb_priv->length)
1604		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1605				"Cancel URB %p, dev %s, ep 0x%x, "
1606				"starting at offset 0x%llx",
1607				urb, urb->dev->devpath,
1608				urb->ep->desc.bEndpointAddress,
1609				(unsigned long long) xhci_trb_virt_to_dma(
1610					urb_priv->td[i]->start_seg,
1611					urb_priv->td[i]->first_trb));
1612
1613	for (; i < urb_priv->length; i++) {
1614		td = urb_priv->td[i];
1615		list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1616	}
1617
1618	/* Queue a stop endpoint command, but only if this is
1619	 * the first cancellation to be handled.
1620	 */
1621	if (!(ep->ep_state & EP_HALT_PENDING)) {
1622		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1623		if (!command) {
1624			ret = -ENOMEM;
1625			goto done;
1626		}
1627		ep->ep_state |= EP_HALT_PENDING;
1628		ep->stop_cmds_pending++;
1629		ep->stop_cmd_timer.expires = jiffies +
1630			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1631		add_timer(&ep->stop_cmd_timer);
1632		xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1633					 ep_index, 0);
1634		xhci_ring_cmd_db(xhci);
1635	}
1636done:
1637	spin_unlock_irqrestore(&xhci->lock, flags);
1638	return ret;
1639}
1640
1641/* Drop an endpoint from a new bandwidth configuration for this device.
1642 * Only one call to this function is allowed per endpoint before
1643 * check_bandwidth() or reset_bandwidth() must be called.
1644 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1645 * add the endpoint to the schedule with possibly new parameters denoted by a
1646 * different endpoint descriptor in usb_host_endpoint.
1647 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1648 * not allowed.
1649 *
1650 * The USB core will not allow URBs to be queued to an endpoint that is being
1651 * disabled, so there's no need for mutual exclusion to protect
1652 * the xhci->devs[slot_id] structure.
1653 */
1654int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1655		struct usb_host_endpoint *ep)
1656{
1657	struct xhci_hcd *xhci;
1658	struct xhci_container_ctx *in_ctx, *out_ctx;
1659	struct xhci_input_control_ctx *ctrl_ctx;
1660	unsigned int ep_index;
1661	struct xhci_ep_ctx *ep_ctx;
1662	u32 drop_flag;
1663	u32 new_add_flags, new_drop_flags;
1664	int ret;
1665
1666	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1667	if (ret <= 0)
1668		return ret;
1669	xhci = hcd_to_xhci(hcd);
1670	if (xhci->xhc_state & XHCI_STATE_DYING)
1671		return -ENODEV;
1672
1673	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1674	drop_flag = xhci_get_endpoint_flag(&ep->desc);
1675	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1676		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1677				__func__, drop_flag);
1678		return 0;
1679	}
1680
1681	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1682	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1683	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1684	if (!ctrl_ctx) {
1685		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1686				__func__);
1687		return 0;
1688	}
1689
1690	ep_index = xhci_get_endpoint_index(&ep->desc);
1691	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1692	/* If the HC already knows the endpoint is disabled,
1693	 * or the HCD has noted it is disabled, ignore this request
1694	 */
1695	if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1696	     cpu_to_le32(EP_STATE_DISABLED)) ||
1697	    le32_to_cpu(ctrl_ctx->drop_flags) &
1698	    xhci_get_endpoint_flag(&ep->desc)) {
1699		/* Do not warn when called after a usb_device_reset */
1700		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1701			xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1702				  __func__, ep);
1703		return 0;
1704	}
1705
1706	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1707	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1708
1709	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1710	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1711
1712	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1713
1714	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1715			(unsigned int) ep->desc.bEndpointAddress,
1716			udev->slot_id,
1717			(unsigned int) new_drop_flags,
1718			(unsigned int) new_add_flags);
1719	return 0;
1720}
1721
1722/* Add an endpoint to a new possible bandwidth configuration for this device.
1723 * Only one call to this function is allowed per endpoint before
1724 * check_bandwidth() or reset_bandwidth() must be called.
1725 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1726 * add the endpoint to the schedule with possibly new parameters denoted by a
1727 * different endpoint descriptor in usb_host_endpoint.
1728 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1729 * not allowed.
1730 *
1731 * The USB core will not allow URBs to be queued to an endpoint until the
1732 * configuration or alt setting is installed in the device, so there's no need
1733 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1734 */
1735int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1736		struct usb_host_endpoint *ep)
1737{
1738	struct xhci_hcd *xhci;
1739	struct xhci_container_ctx *in_ctx;
1740	unsigned int ep_index;
1741	struct xhci_input_control_ctx *ctrl_ctx;
1742	u32 added_ctxs;
1743	u32 new_add_flags, new_drop_flags;
1744	struct xhci_virt_device *virt_dev;
1745	int ret = 0;
1746
1747	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1748	if (ret <= 0) {
1749		/* So we won't queue a reset ep command for a root hub */
1750		ep->hcpriv = NULL;
1751		return ret;
1752	}
1753	xhci = hcd_to_xhci(hcd);
1754	if (xhci->xhc_state & XHCI_STATE_DYING)
1755		return -ENODEV;
1756
1757	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1758	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1759		/* FIXME when we have to issue an evaluate endpoint command to
1760		 * deal with ep0 max packet size changing once we get the
1761		 * descriptors
1762		 */
1763		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1764				__func__, added_ctxs);
1765		return 0;
1766	}
1767
1768	virt_dev = xhci->devs[udev->slot_id];
1769	in_ctx = virt_dev->in_ctx;
1770	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1771	if (!ctrl_ctx) {
1772		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1773				__func__);
1774		return 0;
1775	}
1776
1777	ep_index = xhci_get_endpoint_index(&ep->desc);
1778	/* If this endpoint is already in use, and the upper layers are trying
1779	 * to add it again without dropping it, reject the addition.
1780	 */
1781	if (virt_dev->eps[ep_index].ring &&
1782			!(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1783		xhci_warn(xhci, "Trying to add endpoint 0x%x "
1784				"without dropping it.\n",
1785				(unsigned int) ep->desc.bEndpointAddress);
1786		return -EINVAL;
1787	}
1788
1789	/* If the HCD has already noted the endpoint is enabled,
1790	 * ignore this request.
1791	 */
1792	if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1793		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1794				__func__, ep);
1795		return 0;
1796	}
1797
1798	/*
1799	 * Configuration and alternate setting changes must be done in
1800	 * process context, not interrupt context (or so documenation
1801	 * for usb_set_interface() and usb_set_configuration() claim).
1802	 */
1803	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1804		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1805				__func__, ep->desc.bEndpointAddress);
1806		return -ENOMEM;
1807	}
1808
1809	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1810	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1811
1812	/* If xhci_endpoint_disable() was called for this endpoint, but the
1813	 * xHC hasn't been notified yet through the check_bandwidth() call,
1814	 * this re-adds a new state for the endpoint from the new endpoint
1815	 * descriptors.  We must drop and re-add this endpoint, so we leave the
1816	 * drop flags alone.
1817	 */
1818	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1819
1820	/* Store the usb_device pointer for later use */
1821	ep->hcpriv = udev;
1822
1823	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1824			(unsigned int) ep->desc.bEndpointAddress,
1825			udev->slot_id,
1826			(unsigned int) new_drop_flags,
1827			(unsigned int) new_add_flags);
1828	return 0;
1829}
1830
1831static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1832{
1833	struct xhci_input_control_ctx *ctrl_ctx;
1834	struct xhci_ep_ctx *ep_ctx;
1835	struct xhci_slot_ctx *slot_ctx;
1836	int i;
1837
1838	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1839	if (!ctrl_ctx) {
1840		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1841				__func__);
1842		return;
1843	}
1844
1845	/* When a device's add flag and drop flag are zero, any subsequent
1846	 * configure endpoint command will leave that endpoint's state
1847	 * untouched.  Make sure we don't leave any old state in the input
1848	 * endpoint contexts.
1849	 */
1850	ctrl_ctx->drop_flags = 0;
1851	ctrl_ctx->add_flags = 0;
1852	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1853	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1854	/* Endpoint 0 is always valid */
1855	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1856	for (i = 1; i < 31; ++i) {
1857		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1858		ep_ctx->ep_info = 0;
1859		ep_ctx->ep_info2 = 0;
1860		ep_ctx->deq = 0;
1861		ep_ctx->tx_info = 0;
1862	}
1863}
1864
1865static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1866		struct usb_device *udev, u32 *cmd_status)
1867{
1868	int ret;
1869
1870	switch (*cmd_status) {
1871	case COMP_CMD_ABORT:
1872	case COMP_CMD_STOP:
1873		xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1874		ret = -ETIME;
1875		break;
1876	case COMP_ENOMEM:
1877		dev_warn(&udev->dev,
1878			 "Not enough host controller resources for new device state.\n");
1879		ret = -ENOMEM;
1880		/* FIXME: can we allocate more resources for the HC? */
1881		break;
1882	case COMP_BW_ERR:
1883	case COMP_2ND_BW_ERR:
1884		dev_warn(&udev->dev,
1885			 "Not enough bandwidth for new device state.\n");
1886		ret = -ENOSPC;
1887		/* FIXME: can we go back to the old state? */
1888		break;
1889	case COMP_TRB_ERR:
1890		/* the HCD set up something wrong */
1891		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1892				"add flag = 1, "
1893				"and endpoint is not disabled.\n");
1894		ret = -EINVAL;
1895		break;
1896	case COMP_DEV_ERR:
1897		dev_warn(&udev->dev,
1898			 "ERROR: Incompatible device for endpoint configure command.\n");
1899		ret = -ENODEV;
1900		break;
1901	case COMP_SUCCESS:
1902		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1903				"Successful Endpoint Configure command");
1904		ret = 0;
1905		break;
1906	default:
1907		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1908				*cmd_status);
1909		ret = -EINVAL;
1910		break;
1911	}
1912	return ret;
1913}
1914
1915static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1916		struct usb_device *udev, u32 *cmd_status)
1917{
1918	int ret;
1919	struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1920
1921	switch (*cmd_status) {
1922	case COMP_CMD_ABORT:
1923	case COMP_CMD_STOP:
1924		xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1925		ret = -ETIME;
1926		break;
1927	case COMP_EINVAL:
1928		dev_warn(&udev->dev,
1929			 "WARN: xHCI driver setup invalid evaluate context command.\n");
1930		ret = -EINVAL;
1931		break;
1932	case COMP_EBADSLT:
1933		dev_warn(&udev->dev,
1934			"WARN: slot not enabled for evaluate context command.\n");
1935		ret = -EINVAL;
1936		break;
1937	case COMP_CTX_STATE:
1938		dev_warn(&udev->dev,
1939			"WARN: invalid context state for evaluate context command.\n");
1940		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1941		ret = -EINVAL;
1942		break;
1943	case COMP_DEV_ERR:
1944		dev_warn(&udev->dev,
1945			"ERROR: Incompatible device for evaluate context command.\n");
1946		ret = -ENODEV;
1947		break;
1948	case COMP_MEL_ERR:
1949		/* Max Exit Latency too large error */
1950		dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1951		ret = -EINVAL;
1952		break;
1953	case COMP_SUCCESS:
1954		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1955				"Successful evaluate context command");
1956		ret = 0;
1957		break;
1958	default:
1959		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1960			*cmd_status);
1961		ret = -EINVAL;
1962		break;
1963	}
1964	return ret;
1965}
1966
1967static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1968		struct xhci_input_control_ctx *ctrl_ctx)
1969{
1970	u32 valid_add_flags;
1971	u32 valid_drop_flags;
1972
1973	/* Ignore the slot flag (bit 0), and the default control endpoint flag
1974	 * (bit 1).  The default control endpoint is added during the Address
1975	 * Device command and is never removed until the slot is disabled.
1976	 */
1977	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1978	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1979
1980	/* Use hweight32 to count the number of ones in the add flags, or
1981	 * number of endpoints added.  Don't count endpoints that are changed
1982	 * (both added and dropped).
1983	 */
1984	return hweight32(valid_add_flags) -
1985		hweight32(valid_add_flags & valid_drop_flags);
1986}
1987
1988static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1989		struct xhci_input_control_ctx *ctrl_ctx)
1990{
1991	u32 valid_add_flags;
1992	u32 valid_drop_flags;
1993
1994	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1995	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1996
1997	return hweight32(valid_drop_flags) -
1998		hweight32(valid_add_flags & valid_drop_flags);
1999}
2000
2001/*
2002 * We need to reserve the new number of endpoints before the configure endpoint
2003 * command completes.  We can't subtract the dropped endpoints from the number
2004 * of active endpoints until the command completes because we can oversubscribe
2005 * the host in this case:
2006 *
2007 *  - the first configure endpoint command drops more endpoints than it adds
2008 *  - a second configure endpoint command that adds more endpoints is queued
2009 *  - the first configure endpoint command fails, so the config is unchanged
2010 *  - the second command may succeed, even though there isn't enough resources
2011 *
2012 * Must be called with xhci->lock held.
2013 */
2014static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2015		struct xhci_input_control_ctx *ctrl_ctx)
2016{
2017	u32 added_eps;
2018
2019	added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2020	if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2021		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2022				"Not enough ep ctxs: "
2023				"%u active, need to add %u, limit is %u.",
2024				xhci->num_active_eps, added_eps,
2025				xhci->limit_active_eps);
2026		return -ENOMEM;
2027	}
2028	xhci->num_active_eps += added_eps;
2029	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2030			"Adding %u ep ctxs, %u now active.", added_eps,
2031			xhci->num_active_eps);
2032	return 0;
2033}
2034
2035/*
2036 * The configure endpoint was failed by the xHC for some other reason, so we
2037 * need to revert the resources that failed configuration would have used.
2038 *
2039 * Must be called with xhci->lock held.
2040 */
2041static void xhci_free_host_resources(struct xhci_hcd *xhci,
2042		struct xhci_input_control_ctx *ctrl_ctx)
2043{
2044	u32 num_failed_eps;
2045
2046	num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2047	xhci->num_active_eps -= num_failed_eps;
2048	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2049			"Removing %u failed ep ctxs, %u now active.",
2050			num_failed_eps,
2051			xhci->num_active_eps);
2052}
2053
2054/*
2055 * Now that the command has completed, clean up the active endpoint count by
2056 * subtracting out the endpoints that were dropped (but not changed).
2057 *
2058 * Must be called with xhci->lock held.
2059 */
2060static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2061		struct xhci_input_control_ctx *ctrl_ctx)
2062{
2063	u32 num_dropped_eps;
2064
2065	num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2066	xhci->num_active_eps -= num_dropped_eps;
2067	if (num_dropped_eps)
2068		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2069				"Removing %u dropped ep ctxs, %u now active.",
2070				num_dropped_eps,
2071				xhci->num_active_eps);
2072}
2073
2074static unsigned int xhci_get_block_size(struct usb_device *udev)
2075{
2076	switch (udev->speed) {
2077	case USB_SPEED_LOW:
2078	case USB_SPEED_FULL:
2079		return FS_BLOCK;
2080	case USB_SPEED_HIGH:
2081		return HS_BLOCK;
2082	case USB_SPEED_SUPER:
2083		return SS_BLOCK;
2084	case USB_SPEED_UNKNOWN:
2085	case USB_SPEED_WIRELESS:
2086	default:
2087		/* Should never happen */
2088		return 1;
2089	}
2090}
2091
2092static unsigned int
2093xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2094{
2095	if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2096		return LS_OVERHEAD;
2097	if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2098		return FS_OVERHEAD;
2099	return HS_OVERHEAD;
2100}
2101
2102/* If we are changing a LS/FS device under a HS hub,
2103 * make sure (if we are activating a new TT) that the HS bus has enough
2104 * bandwidth for this new TT.
2105 */
2106static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2107		struct xhci_virt_device *virt_dev,
2108		int old_active_eps)
2109{
2110	struct xhci_interval_bw_table *bw_table;
2111	struct xhci_tt_bw_info *tt_info;
2112
2113	/* Find the bandwidth table for the root port this TT is attached to. */
2114	bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2115	tt_info = virt_dev->tt_info;
2116	/* If this TT already had active endpoints, the bandwidth for this TT
2117	 * has already been added.  Removing all periodic endpoints (and thus
2118	 * making the TT enactive) will only decrease the bandwidth used.
2119	 */
2120	if (old_active_eps)
2121		return 0;
2122	if (old_active_eps == 0 && tt_info->active_eps != 0) {
2123		if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2124			return -ENOMEM;
2125		return 0;
2126	}
2127	/* Not sure why we would have no new active endpoints...
2128	 *
2129	 * Maybe because of an Evaluate Context change for a hub update or a
2130	 * control endpoint 0 max packet size change?
2131	 * FIXME: skip the bandwidth calculation in that case.
2132	 */
2133	return 0;
2134}
2135
2136static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2137		struct xhci_virt_device *virt_dev)
2138{
2139	unsigned int bw_reserved;
2140
2141	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2142	if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2143		return -ENOMEM;
2144
2145	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2146	if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2147		return -ENOMEM;
2148
2149	return 0;
2150}
2151
2152/*
2153 * This algorithm is a very conservative estimate of the worst-case scheduling
2154 * scenario for any one interval.  The hardware dynamically schedules the
2155 * packets, so we can't tell which microframe could be the limiting factor in
2156 * the bandwidth scheduling.  This only takes into account periodic endpoints.
2157 *
2158 * Obviously, we can't solve an NP complete problem to find the minimum worst
2159 * case scenario.  Instead, we come up with an estimate that is no less than
2160 * the worst case bandwidth used for any one microframe, but may be an
2161 * over-estimate.
2162 *
2163 * We walk the requirements for each endpoint by interval, starting with the
2164 * smallest interval, and place packets in the schedule where there is only one
2165 * possible way to schedule packets for that interval.  In order to simplify
2166 * this algorithm, we record the largest max packet size for each interval, and
2167 * assume all packets will be that size.
2168 *
2169 * For interval 0, we obviously must schedule all packets for each interval.
2170 * The bandwidth for interval 0 is just the amount of data to be transmitted
2171 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2172 * the number of packets).
2173 *
2174 * For interval 1, we have two possible microframes to schedule those packets
2175 * in.  For this algorithm, if we can schedule the same number of packets for
2176 * each possible scheduling opportunity (each microframe), we will do so.  The
2177 * remaining number of packets will be saved to be transmitted in the gaps in
2178 * the next interval's scheduling sequence.
2179 *
2180 * As we move those remaining packets to be scheduled with interval 2 packets,
2181 * we have to double the number of remaining packets to transmit.  This is
2182 * because the intervals are actually powers of 2, and we would be transmitting
2183 * the previous interval's packets twice in this interval.  We also have to be
2184 * sure that when we look at the largest max packet size for this interval, we
2185 * also look at the largest max packet size for the remaining packets and take
2186 * the greater of the two.
2187 *
2188 * The algorithm continues to evenly distribute packets in each scheduling
2189 * opportunity, and push the remaining packets out, until we get to the last
2190 * interval.  Then those packets and their associated overhead are just added
2191 * to the bandwidth used.
2192 */
2193static int xhci_check_bw_table(struct xhci_hcd *xhci,
2194		struct xhci_virt_device *virt_dev,
2195		int old_active_eps)
2196{
2197	unsigned int bw_reserved;
2198	unsigned int max_bandwidth;
2199	unsigned int bw_used;
2200	unsigned int block_size;
2201	struct xhci_interval_bw_table *bw_table;
2202	unsigned int packet_size = 0;
2203	unsigned int overhead = 0;
2204	unsigned int packets_transmitted = 0;
2205	unsigned int packets_remaining = 0;
2206	unsigned int i;
2207
2208	if (virt_dev->udev->speed == USB_SPEED_SUPER)
2209		return xhci_check_ss_bw(xhci, virt_dev);
2210
2211	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2212		max_bandwidth = HS_BW_LIMIT;
2213		/* Convert percent of bus BW reserved to blocks reserved */
2214		bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2215	} else {
2216		max_bandwidth = FS_BW_LIMIT;
2217		bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2218	}
2219
2220	bw_table = virt_dev->bw_table;
2221	/* We need to translate the max packet size and max ESIT payloads into
2222	 * the units the hardware uses.
2223	 */
2224	block_size = xhci_get_block_size(virt_dev->udev);
2225
2226	/* If we are manipulating a LS/FS device under a HS hub, double check
2227	 * that the HS bus has enough bandwidth if we are activing a new TT.
2228	 */
2229	if (virt_dev->tt_info) {
2230		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2231				"Recalculating BW for rootport %u",
2232				virt_dev->real_port);
2233		if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2234			xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2235					"newly activated TT.\n");
2236			return -ENOMEM;
2237		}
2238		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2239				"Recalculating BW for TT slot %u port %u",
2240				virt_dev->tt_info->slot_id,
2241				virt_dev->tt_info->ttport);
2242	} else {
2243		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2244				"Recalculating BW for rootport %u",
2245				virt_dev->real_port);
2246	}
2247
2248	/* Add in how much bandwidth will be used for interval zero, or the
2249	 * rounded max ESIT payload + number of packets * largest overhead.
2250	 */
2251	bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2252		bw_table->interval_bw[0].num_packets *
2253		xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2254
2255	for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2256		unsigned int bw_added;
2257		unsigned int largest_mps;
2258		unsigned int interval_overhead;
2259
2260		/*
2261		 * How many packets could we transmit in this interval?
2262		 * If packets didn't fit in the previous interval, we will need
2263		 * to transmit that many packets twice within this interval.
2264		 */
2265		packets_remaining = 2 * packets_remaining +
2266			bw_table->interval_bw[i].num_packets;
2267
2268		/* Find the largest max packet size of this or the previous
2269		 * interval.
2270		 */
2271		if (list_empty(&bw_table->interval_bw[i].endpoints))
2272			largest_mps = 0;
2273		else {
2274			struct xhci_virt_ep *virt_ep;
2275			struct list_head *ep_entry;
2276
2277			ep_entry = bw_table->interval_bw[i].endpoints.next;
2278			virt_ep = list_entry(ep_entry,
2279					struct xhci_virt_ep, bw_endpoint_list);
2280			/* Convert to blocks, rounding up */
2281			largest_mps = DIV_ROUND_UP(
2282					virt_ep->bw_info.max_packet_size,
2283					block_size);
2284		}
2285		if (largest_mps > packet_size)
2286			packet_size = largest_mps;
2287
2288		/* Use the larger overhead of this or the previous interval. */
2289		interval_overhead = xhci_get_largest_overhead(
2290				&bw_table->interval_bw[i]);
2291		if (interval_overhead > overhead)
2292			overhead = interval_overhead;
2293
2294		/* How many packets can we evenly distribute across
2295		 * (1 << (i + 1)) possible scheduling opportunities?
2296		 */
2297		packets_transmitted = packets_remaining >> (i + 1);
2298
2299		/* Add in the bandwidth used for those scheduled packets */
2300		bw_added = packets_transmitted * (overhead + packet_size);
2301
2302		/* How many packets do we have remaining to transmit? */
2303		packets_remaining = packets_remaining % (1 << (i + 1));
2304
2305		/* What largest max packet size should those packets have? */
2306		/* If we've transmitted all packets, don't carry over the
2307		 * largest packet size.
2308		 */
2309		if (packets_remaining == 0) {
2310			packet_size = 0;
2311			overhead = 0;
2312		} else if (packets_transmitted > 0) {
2313			/* Otherwise if we do have remaining packets, and we've
2314			 * scheduled some packets in this interval, take the
2315			 * largest max packet size from endpoints with this
2316			 * interval.
2317			 */
2318			packet_size = largest_mps;
2319			overhead = interval_overhead;
2320		}
2321		/* Otherwise carry over packet_size and overhead from the last
2322		 * time we had a remainder.
2323		 */
2324		bw_used += bw_added;
2325		if (bw_used > max_bandwidth) {
2326			xhci_warn(xhci, "Not enough bandwidth. "
2327					"Proposed: %u, Max: %u\n",
2328				bw_used, max_bandwidth);
2329			return -ENOMEM;
2330		}
2331	}
2332	/*
2333	 * Ok, we know we have some packets left over after even-handedly
2334	 * scheduling interval 15.  We don't know which microframes they will
2335	 * fit into, so we over-schedule and say they will be scheduled every
2336	 * microframe.
2337	 */
2338	if (packets_remaining > 0)
2339		bw_used += overhead + packet_size;
2340
2341	if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2342		unsigned int port_index = virt_dev->real_port - 1;
2343
2344		/* OK, we're manipulating a HS device attached to a
2345		 * root port bandwidth domain.  Include the number of active TTs
2346		 * in the bandwidth used.
2347		 */
2348		bw_used += TT_HS_OVERHEAD *
2349			xhci->rh_bw[port_index].num_active_tts;
2350	}
2351
2352	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2353		"Final bandwidth: %u, Limit: %u, Reserved: %u, "
2354		"Available: %u " "percent",
2355		bw_used, max_bandwidth, bw_reserved,
2356		(max_bandwidth - bw_used - bw_reserved) * 100 /
2357		max_bandwidth);
2358
2359	bw_used += bw_reserved;
2360	if (bw_used > max_bandwidth) {
2361		xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2362				bw_used, max_bandwidth);
2363		return -ENOMEM;
2364	}
2365
2366	bw_table->bw_used = bw_used;
2367	return 0;
2368}
2369
2370static bool xhci_is_async_ep(unsigned int ep_type)
2371{
2372	return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2373					ep_type != ISOC_IN_EP &&
2374					ep_type != INT_IN_EP);
2375}
2376
2377static bool xhci_is_sync_in_ep(unsigned int ep_type)
2378{
2379	return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2380}
2381
2382static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2383{
2384	unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2385
2386	if (ep_bw->ep_interval == 0)
2387		return SS_OVERHEAD_BURST +
2388			(ep_bw->mult * ep_bw->num_packets *
2389					(SS_OVERHEAD + mps));
2390	return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2391				(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2392				1 << ep_bw->ep_interval);
2393
2394}
2395
2396void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2397		struct xhci_bw_info *ep_bw,
2398		struct xhci_interval_bw_table *bw_table,
2399		struct usb_device *udev,
2400		struct xhci_virt_ep *virt_ep,
2401		struct xhci_tt_bw_info *tt_info)
2402{
2403	struct xhci_interval_bw	*interval_bw;
2404	int normalized_interval;
2405
2406	if (xhci_is_async_ep(ep_bw->type))
2407		return;
2408
2409	if (udev->speed == USB_SPEED_SUPER) {
2410		if (xhci_is_sync_in_ep(ep_bw->type))
2411			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2412				xhci_get_ss_bw_consumed(ep_bw);
2413		else
2414			xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2415				xhci_get_ss_bw_consumed(ep_bw);
2416		return;
2417	}
2418
2419	/* SuperSpeed endpoints never get added to intervals in the table, so
2420	 * this check is only valid for HS/FS/LS devices.
2421	 */
2422	if (list_empty(&virt_ep->bw_endpoint_list))
2423		return;
2424	/* For LS/FS devices, we need to translate the interval expressed in
2425	 * microframes to frames.
2426	 */
2427	if (udev->speed == USB_SPEED_HIGH)
2428		normalized_interval = ep_bw->ep_interval;
2429	else
2430		normalized_interval = ep_bw->ep_interval - 3;
2431
2432	if (normalized_interval == 0)
2433		bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2434	interval_bw = &bw_table->interval_bw[normalized_interval];
2435	interval_bw->num_packets -= ep_bw->num_packets;
2436	switch (udev->speed) {
2437	case USB_SPEED_LOW:
2438		interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2439		break;
2440	case USB_SPEED_FULL:
2441		interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2442		break;
2443	case USB_SPEED_HIGH:
2444		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2445		break;
2446	case USB_SPEED_SUPER:
2447	case USB_SPEED_UNKNOWN:
2448	case USB_SPEED_WIRELESS:
2449		/* Should never happen because only LS/FS/HS endpoints will get
2450		 * added to the endpoint list.
2451		 */
2452		return;
2453	}
2454	if (tt_info)
2455		tt_info->active_eps -= 1;
2456	list_del_init(&virt_ep->bw_endpoint_list);
2457}
2458
2459static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2460		struct xhci_bw_info *ep_bw,
2461		struct xhci_interval_bw_table *bw_table,
2462		struct usb_device *udev,
2463		struct xhci_virt_ep *virt_ep,
2464		struct xhci_tt_bw_info *tt_info)
2465{
2466	struct xhci_interval_bw	*interval_bw;
2467	struct xhci_virt_ep *smaller_ep;
2468	int normalized_interval;
2469
2470	if (xhci_is_async_ep(ep_bw->type))
2471		return;
2472
2473	if (udev->speed == USB_SPEED_SUPER) {
2474		if (xhci_is_sync_in_ep(ep_bw->type))
2475			xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2476				xhci_get_ss_bw_consumed(ep_bw);
2477		else
2478			xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2479				xhci_get_ss_bw_consumed(ep_bw);
2480		return;
2481	}
2482
2483	/* For LS/FS devices, we need to translate the interval expressed in
2484	 * microframes to frames.
2485	 */
2486	if (udev->speed == USB_SPEED_HIGH)
2487		normalized_interval = ep_bw->ep_interval;
2488	else
2489		normalized_interval = ep_bw->ep_interval - 3;
2490
2491	if (normalized_interval == 0)
2492		bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2493	interval_bw = &bw_table->interval_bw[normalized_interval];
2494	interval_bw->num_packets += ep_bw->num_packets;
2495	switch (udev->speed) {
2496	case USB_SPEED_LOW:
2497		interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2498		break;
2499	case USB_SPEED_FULL:
2500		interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2501		break;
2502	case USB_SPEED_HIGH:
2503		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2504		break;
2505	case USB_SPEED_SUPER:
2506	case USB_SPEED_UNKNOWN:
2507	case USB_SPEED_WIRELESS:
2508		/* Should never happen because only LS/FS/HS endpoints will get
2509		 * added to the endpoint list.
2510		 */
2511		return;
2512	}
2513
2514	if (tt_info)
2515		tt_info->active_eps += 1;
2516	/* Insert the endpoint into the list, largest max packet size first. */
2517	list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2518			bw_endpoint_list) {
2519		if (ep_bw->max_packet_size >=
2520				smaller_ep->bw_info.max_packet_size) {
2521			/* Add the new ep before the smaller endpoint */
2522			list_add_tail(&virt_ep->bw_endpoint_list,
2523					&smaller_ep->bw_endpoint_list);
2524			return;
2525		}
2526	}
2527	/* Add the new endpoint at the end of the list. */
2528	list_add_tail(&virt_ep->bw_endpoint_list,
2529			&interval_bw->endpoints);
2530}
2531
2532void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2533		struct xhci_virt_device *virt_dev,
2534		int old_active_eps)
2535{
2536	struct xhci_root_port_bw_info *rh_bw_info;
2537	if (!virt_dev->tt_info)
2538		return;
2539
2540	rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2541	if (old_active_eps == 0 &&
2542				virt_dev->tt_info->active_eps != 0) {
2543		rh_bw_info->num_active_tts += 1;
2544		rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2545	} else if (old_active_eps != 0 &&
2546				virt_dev->tt_info->active_eps == 0) {
2547		rh_bw_info->num_active_tts -= 1;
2548		rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2549	}
2550}
2551
2552static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2553		struct xhci_virt_device *virt_dev,
2554		struct xhci_container_ctx *in_ctx)
2555{
2556	struct xhci_bw_info ep_bw_info[31];
2557	int i;
2558	struct xhci_input_control_ctx *ctrl_ctx;
2559	int old_active_eps = 0;
2560
2561	if (virt_dev->tt_info)
2562		old_active_eps = virt_dev->tt_info->active_eps;
2563
2564	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2565	if (!ctrl_ctx) {
2566		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2567				__func__);
2568		return -ENOMEM;
2569	}
2570
2571	for (i = 0; i < 31; i++) {
2572		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2573			continue;
2574
2575		/* Make a copy of the BW info in case we need to revert this */
2576		memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2577				sizeof(ep_bw_info[i]));
2578		/* Drop the endpoint from the interval table if the endpoint is
2579		 * being dropped or changed.
2580		 */
2581		if (EP_IS_DROPPED(ctrl_ctx, i))
2582			xhci_drop_ep_from_interval_table(xhci,
2583					&virt_dev->eps[i].bw_info,
2584					virt_dev->bw_table,
2585					virt_dev->udev,
2586					&virt_dev->eps[i],
2587					virt_dev->tt_info);
2588	}
2589	/* Overwrite the information stored in the endpoints' bw_info */
2590	xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2591	for (i = 0; i < 31; i++) {
2592		/* Add any changed or added endpoints to the interval table */
2593		if (EP_IS_ADDED(ctrl_ctx, i))
2594			xhci_add_ep_to_interval_table(xhci,
2595					&virt_dev->eps[i].bw_info,
2596					virt_dev->bw_table,
2597					virt_dev->udev,
2598					&virt_dev->eps[i],
2599					virt_dev->tt_info);
2600	}
2601
2602	if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2603		/* Ok, this fits in the bandwidth we have.
2604		 * Update the number of active TTs.
2605		 */
2606		xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2607		return 0;
2608	}
2609
2610	/* We don't have enough bandwidth for this, revert the stored info. */
2611	for (i = 0; i < 31; i++) {
2612		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2613			continue;
2614
2615		/* Drop the new copies of any added or changed endpoints from
2616		 * the interval table.
2617		 */
2618		if (EP_IS_ADDED(ctrl_ctx, i)) {
2619			xhci_drop_ep_from_interval_table(xhci,
2620					&virt_dev->eps[i].bw_info,
2621					virt_dev->bw_table,
2622					virt_dev->udev,
2623					&virt_dev->eps[i],
2624					virt_dev->tt_info);
2625		}
2626		/* Revert the endpoint back to its old information */
2627		memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2628				sizeof(ep_bw_info[i]));
2629		/* Add any changed or dropped endpoints back into the table */
2630		if (EP_IS_DROPPED(ctrl_ctx, i))
2631			xhci_add_ep_to_interval_table(xhci,
2632					&virt_dev->eps[i].bw_info,
2633					virt_dev->bw_table,
2634					virt_dev->udev,
2635					&virt_dev->eps[i],
2636					virt_dev->tt_info);
2637	}
2638	return -ENOMEM;
2639}
2640
2641
2642/* Issue a configure endpoint command or evaluate context command
2643 * and wait for it to finish.
2644 */
2645static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2646		struct usb_device *udev,
2647		struct xhci_command *command,
2648		bool ctx_change, bool must_succeed)
2649{
2650	int ret;
2651	unsigned long flags;
2652	struct xhci_input_control_ctx *ctrl_ctx;
2653	struct xhci_virt_device *virt_dev;
2654
2655	if (!command)
2656		return -EINVAL;
2657
2658	spin_lock_irqsave(&xhci->lock, flags);
2659	virt_dev = xhci->devs[udev->slot_id];
2660
2661	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2662	if (!ctrl_ctx) {
2663		spin_unlock_irqrestore(&xhci->lock, flags);
2664		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2665				__func__);
2666		return -ENOMEM;
2667	}
2668
2669	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2670			xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2671		spin_unlock_irqrestore(&xhci->lock, flags);
2672		xhci_warn(xhci, "Not enough host resources, "
2673				"active endpoint contexts = %u\n",
2674				xhci->num_active_eps);
2675		return -ENOMEM;
2676	}
2677	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2678	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2679		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2680			xhci_free_host_resources(xhci, ctrl_ctx);
2681		spin_unlock_irqrestore(&xhci->lock, flags);
2682		xhci_warn(xhci, "Not enough bandwidth\n");
2683		return -ENOMEM;
2684	}
2685
2686	if (!ctx_change)
2687		ret = xhci_queue_configure_endpoint(xhci, command,
2688				command->in_ctx->dma,
2689				udev->slot_id, must_succeed);
2690	else
2691		ret = xhci_queue_evaluate_context(xhci, command,
2692				command->in_ctx->dma,
2693				udev->slot_id, must_succeed);
2694	if (ret < 0) {
2695		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2696			xhci_free_host_resources(xhci, ctrl_ctx);
2697		spin_unlock_irqrestore(&xhci->lock, flags);
2698		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
2699				"FIXME allocate a new ring segment");
2700		return -ENOMEM;
2701	}
2702	xhci_ring_cmd_db(xhci);
2703	spin_unlock_irqrestore(&xhci->lock, flags);
2704
2705	/* Wait for the configure endpoint command to complete */
2706	wait_for_completion(command->completion);
2707
2708	if (!ctx_change)
2709		ret = xhci_configure_endpoint_result(xhci, udev,
2710						     &command->status);
2711	else
2712		ret = xhci_evaluate_context_result(xhci, udev,
2713						   &command->status);
2714
2715	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2716		spin_lock_irqsave(&xhci->lock, flags);
2717		/* If the command failed, remove the reserved resources.
2718		 * Otherwise, clean up the estimate to include dropped eps.
2719		 */
2720		if (ret)
2721			xhci_free_host_resources(xhci, ctrl_ctx);
2722		else
2723			xhci_finish_resource_reservation(xhci, ctrl_ctx);
2724		spin_unlock_irqrestore(&xhci->lock, flags);
2725	}
2726	return ret;
2727}
2728
2729static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2730	struct xhci_virt_device *vdev, int i)
2731{
2732	struct xhci_virt_ep *ep = &vdev->eps[i];
2733
2734	if (ep->ep_state & EP_HAS_STREAMS) {
2735		xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2736				xhci_get_endpoint_address(i));
2737		xhci_free_stream_info(xhci, ep->stream_info);
2738		ep->stream_info = NULL;
2739		ep->ep_state &= ~EP_HAS_STREAMS;
2740	}
2741}
2742
2743/* Called after one or more calls to xhci_add_endpoint() or
2744 * xhci_drop_endpoint().  If this call fails, the USB core is expected
2745 * to call xhci_reset_bandwidth().
2746 *
2747 * Since we are in the middle of changing either configuration or
2748 * installing a new alt setting, the USB core won't allow URBs to be
2749 * enqueued for any endpoint on the old config or interface.  Nothing
2750 * else should be touching the xhci->devs[slot_id] structure, so we
2751 * don't need to take the xhci->lock for manipulating that.
2752 */
2753int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2754{
2755	int i;
2756	int ret = 0;
2757	struct xhci_hcd *xhci;
2758	struct xhci_virt_device	*virt_dev;
2759	struct xhci_input_control_ctx *ctrl_ctx;
2760	struct xhci_slot_ctx *slot_ctx;
2761	struct xhci_command *command;
2762
2763	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2764	if (ret <= 0)
2765		return ret;
2766	xhci = hcd_to_xhci(hcd);
2767	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2768		(xhci->xhc_state & XHCI_STATE_REMOVING))
2769		return -ENODEV;
2770
2771	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2772	virt_dev = xhci->devs[udev->slot_id];
2773
2774	command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2775	if (!command)
2776		return -ENOMEM;
2777
2778	command->in_ctx = virt_dev->in_ctx;
2779
2780	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2781	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2782	if (!ctrl_ctx) {
2783		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2784				__func__);
2785		ret = -ENOMEM;
2786		goto command_cleanup;
2787	}
2788	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2789	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2790	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2791
2792	/* Don't issue the command if there's no endpoints to update. */
2793	if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2794	    ctrl_ctx->drop_flags == 0) {
2795		ret = 0;
2796		goto command_cleanup;
2797	}
2798	/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2799	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2800	for (i = 31; i >= 1; i--) {
2801		__le32 le32 = cpu_to_le32(BIT(i));
2802
2803		if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2804		    || (ctrl_ctx->add_flags & le32) || i == 1) {
2805			slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2806			slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2807			break;
2808		}
2809	}
2810	xhci_dbg(xhci, "New Input Control Context:\n");
2811	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2812		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2813
2814	ret = xhci_configure_endpoint(xhci, udev, command,
2815			false, false);
2816	if (ret)
2817		/* Callee should call reset_bandwidth() */
2818		goto command_cleanup;
2819
2820	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2821	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2822		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2823
2824	/* Free any rings that were dropped, but not changed. */
2825	for (i = 1; i < 31; ++i) {
2826		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2827		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2828			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2829			xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2830		}
2831	}
2832	xhci_zero_in_ctx(xhci, virt_dev);
2833	/*
2834	 * Install any rings for completely new endpoints or changed endpoints,
2835	 * and free or cache any old rings from changed endpoints.
2836	 */
2837	for (i = 1; i < 31; ++i) {
2838		if (!virt_dev->eps[i].new_ring)
2839			continue;
2840		/* Only cache or free the old ring if it exists.
2841		 * It may not if this is the first add of an endpoint.
2842		 */
2843		if (virt_dev->eps[i].ring) {
2844			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2845		}
2846		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2847		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2848		virt_dev->eps[i].new_ring = NULL;
2849	}
2850command_cleanup:
2851	kfree(command->completion);
2852	kfree(command);
2853
2854	return ret;
2855}
2856
2857void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2858{
2859	struct xhci_hcd *xhci;
2860	struct xhci_virt_device	*virt_dev;
2861	int i, ret;
2862
2863	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2864	if (ret <= 0)
2865		return;
2866	xhci = hcd_to_xhci(hcd);
2867
2868	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2869	virt_dev = xhci->devs[udev->slot_id];
2870	/* Free any rings allocated for added endpoints */
2871	for (i = 0; i < 31; ++i) {
2872		if (virt_dev->eps[i].new_ring) {
2873			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2874			virt_dev->eps[i].new_ring = NULL;
2875		}
2876	}
2877	xhci_zero_in_ctx(xhci, virt_dev);
2878}
2879
2880static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2881		struct xhci_container_ctx *in_ctx,
2882		struct xhci_container_ctx *out_ctx,
2883		struct xhci_input_control_ctx *ctrl_ctx,
2884		u32 add_flags, u32 drop_flags)
2885{
2886	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2887	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2888	xhci_slot_copy(xhci, in_ctx, out_ctx);
2889	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2890
2891	xhci_dbg(xhci, "Input Context:\n");
2892	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2893}
2894
2895static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2896		unsigned int slot_id, unsigned int ep_index,
2897		struct xhci_dequeue_state *deq_state)
2898{
2899	struct xhci_input_control_ctx *ctrl_ctx;
2900	struct xhci_container_ctx *in_ctx;
2901	struct xhci_ep_ctx *ep_ctx;
2902	u32 added_ctxs;
2903	dma_addr_t addr;
2904
2905	in_ctx = xhci->devs[slot_id]->in_ctx;
2906	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2907	if (!ctrl_ctx) {
2908		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2909				__func__);
2910		return;
2911	}
2912
2913	xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2914			xhci->devs[slot_id]->out_ctx, ep_index);
2915	ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2916	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2917			deq_state->new_deq_ptr);
2918	if (addr == 0) {
2919		xhci_warn(xhci, "WARN Cannot submit config ep after "
2920				"reset ep command\n");
2921		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2922				deq_state->new_deq_seg,
2923				deq_state->new_deq_ptr);
2924		return;
2925	}
2926	ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2927
2928	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2929	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2930			xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2931			added_ctxs, added_ctxs);
2932}
2933
2934void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2935			unsigned int ep_index, struct xhci_td *td)
2936{
2937	struct xhci_dequeue_state deq_state;
2938	struct xhci_virt_ep *ep;
2939	struct usb_device *udev = td->urb->dev;
2940
2941	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2942			"Cleaning up stalled endpoint ring");
2943	ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2944	/* We need to move the HW's dequeue pointer past this TD,
2945	 * or it will attempt to resend it on the next doorbell ring.
2946	 */
2947	xhci_find_new_dequeue_state(xhci, udev->slot_id,
2948			ep_index, ep->stopped_stream, td, &deq_state);
2949
2950	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2951		return;
2952
2953	/* HW with the reset endpoint quirk will use the saved dequeue state to
2954	 * issue a configure endpoint command later.
2955	 */
2956	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2957		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2958				"Queueing new dequeue state");
2959		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2960				ep_index, ep->stopped_stream, &deq_state);
2961	} else {
2962		/* Better hope no one uses the input context between now and the
2963		 * reset endpoint completion!
2964		 * XXX: No idea how this hardware will react when stream rings
2965		 * are enabled.
2966		 */
2967		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2968				"Setting up input context for "
2969				"configure endpoint command");
2970		xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2971				ep_index, &deq_state);
2972	}
2973}
2974
2975/* Called when clearing halted device. The core should have sent the control
2976 * message to clear the device halt condition. The host side of the halt should
2977 * already be cleared with a reset endpoint command issued when the STALL tx
2978 * event was received.
2979 *
2980 * Context: in_interrupt
2981 */
2982
2983void xhci_endpoint_reset(struct usb_hcd *hcd,
2984		struct usb_host_endpoint *ep)
2985{
2986	struct xhci_hcd *xhci;
2987
2988	xhci = hcd_to_xhci(hcd);
2989
2990	/*
2991	 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2992	 * The Reset Endpoint Command may only be issued to endpoints in the
2993	 * Halted state. If software wishes reset the Data Toggle or Sequence
2994	 * Number of an endpoint that isn't in the Halted state, then software
2995	 * may issue a Configure Endpoint Command with the Drop and Add bits set
2996	 * for the target endpoint. that is in the Stopped state.
2997	 */
2998
2999	/* For now just print debug to follow the situation */
3000	xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
3001		 ep->desc.bEndpointAddress);
3002}
3003
3004static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3005		struct usb_device *udev, struct usb_host_endpoint *ep,
3006		unsigned int slot_id)
3007{
3008	int ret;
3009	unsigned int ep_index;
3010	unsigned int ep_state;
3011
3012	if (!ep)
3013		return -EINVAL;
3014	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3015	if (ret <= 0)
3016		return -EINVAL;
3017	if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3018		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3019				" descriptor for ep 0x%x does not support streams\n",
3020				ep->desc.bEndpointAddress);
3021		return -EINVAL;
3022	}
3023
3024	ep_index = xhci_get_endpoint_index(&ep->desc);
3025	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3026	if (ep_state & EP_HAS_STREAMS ||
3027			ep_state & EP_GETTING_STREAMS) {
3028		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3029				"already has streams set up.\n",
3030				ep->desc.bEndpointAddress);
3031		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3032				"dynamic stream context array reallocation.\n");
3033		return -EINVAL;
3034	}
3035	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3036		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3037				"endpoint 0x%x; URBs are pending.\n",
3038				ep->desc.bEndpointAddress);
3039		return -EINVAL;
3040	}
3041	return 0;
3042}
3043
3044static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3045		unsigned int *num_streams, unsigned int *num_stream_ctxs)
3046{
3047	unsigned int max_streams;
3048
3049	/* The stream context array size must be a power of two */
3050	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
3051	/*
3052	 * Find out how many primary stream array entries the host controller
3053	 * supports.  Later we may use secondary stream arrays (similar to 2nd
3054	 * level page entries), but that's an optional feature for xHCI host
3055	 * controllers. xHCs must support at least 4 stream IDs.
3056	 */
3057	max_streams = HCC_MAX_PSA(xhci->hcc_params);
3058	if (*num_stream_ctxs > max_streams) {
3059		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3060				max_streams);
3061		*num_stream_ctxs = max_streams;
3062		*num_streams = max_streams;
3063	}
3064}
3065
3066/* Returns an error code if one of the endpoint already has streams.
3067 * This does not change any data structures, it only checks and gathers
3068 * information.
3069 */
3070static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3071		struct usb_device *udev,
3072		struct usb_host_endpoint **eps, unsigned int num_eps,
3073		unsigned int *num_streams, u32 *changed_ep_bitmask)
3074{
3075	unsigned int max_streams;
3076	unsigned int endpoint_flag;
3077	int i;
3078	int ret;
3079
3080	for (i = 0; i < num_eps; i++) {
3081		ret = xhci_check_streams_endpoint(xhci, udev,
3082				eps[i], udev->slot_id);
3083		if (ret < 0)
3084			return ret;
3085
3086		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3087		if (max_streams < (*num_streams - 1)) {
3088			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3089					eps[i]->desc.bEndpointAddress,
3090					max_streams);
3091			*num_streams = max_streams+1;
3092		}
3093
3094		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3095		if (*changed_ep_bitmask & endpoint_flag)
3096			return -EINVAL;
3097		*changed_ep_bitmask |= endpoint_flag;
3098	}
3099	return 0;
3100}
3101
3102static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3103		struct usb_device *udev,
3104		struct usb_host_endpoint **eps, unsigned int num_eps)
3105{
3106	u32 changed_ep_bitmask = 0;
3107	unsigned int slot_id;
3108	unsigned int ep_index;
3109	unsigned int ep_state;
3110	int i;
3111
3112	slot_id = udev->slot_id;
3113	if (!xhci->devs[slot_id])
3114		return 0;
3115
3116	for (i = 0; i < num_eps; i++) {
3117		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3118		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3119		/* Are streams already being freed for the endpoint? */
3120		if (ep_state & EP_GETTING_NO_STREAMS) {
3121			xhci_warn(xhci, "WARN Can't disable streams for "
3122					"endpoint 0x%x, "
3123					"streams are being disabled already\n",
3124					eps[i]->desc.bEndpointAddress);
3125			return 0;
3126		}
3127		/* Are there actually any streams to free? */
3128		if (!(ep_state & EP_HAS_STREAMS) &&
3129				!(ep_state & EP_GETTING_STREAMS)) {
3130			xhci_warn(xhci, "WARN Can't disable streams for "
3131					"endpoint 0x%x, "
3132					"streams are already disabled!\n",
3133					eps[i]->desc.bEndpointAddress);
3134			xhci_warn(xhci, "WARN xhci_free_streams() called "
3135					"with non-streams endpoint\n");
3136			return 0;
3137		}
3138		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3139	}
3140	return changed_ep_bitmask;
3141}
3142
3143/*
3144 * The USB device drivers use this function (though the HCD interface in USB
3145 * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
3146 * coordinate mass storage command queueing across multiple endpoints (basically
3147 * a stream ID == a task ID).
3148 *
3149 * Setting up streams involves allocating the same size stream context array
3150 * for each endpoint and issuing a configure endpoint command for all endpoints.
3151 *
3152 * Don't allow the call to succeed if one endpoint only supports one stream
3153 * (which means it doesn't support streams at all).
3154 *
3155 * Drivers may get less stream IDs than they asked for, if the host controller
3156 * hardware or endpoints claim they can't support the number of requested
3157 * stream IDs.
3158 */
3159int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3160		struct usb_host_endpoint **eps, unsigned int num_eps,
3161		unsigned int num_streams, gfp_t mem_flags)
3162{
3163	int i, ret;
3164	struct xhci_hcd *xhci;
3165	struct xhci_virt_device *vdev;
3166	struct xhci_command *config_cmd;
3167	struct xhci_input_control_ctx *ctrl_ctx;
3168	unsigned int ep_index;
3169	unsigned int num_stream_ctxs;
3170	unsigned long flags;
3171	u32 changed_ep_bitmask = 0;
3172
3173	if (!eps)
3174		return -EINVAL;
3175
3176	/* Add one to the number of streams requested to account for
3177	 * stream 0 that is reserved for xHCI usage.
3178	 */
3179	num_streams += 1;
3180	xhci = hcd_to_xhci(hcd);
3181	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3182			num_streams);
3183
3184	/* MaxPSASize value 0 (2 streams) means streams are not supported */
3185	if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3186			HCC_MAX_PSA(xhci->hcc_params) < 4) {
3187		xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3188		return -ENOSYS;
3189	}
3190
3191	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3192	if (!config_cmd) {
3193		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3194		return -ENOMEM;
3195	}
3196	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3197	if (!ctrl_ctx) {
3198		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3199				__func__);
3200		xhci_free_command(xhci, config_cmd);
3201		return -ENOMEM;
3202	}
3203
3204	/* Check to make sure all endpoints are not already configured for
3205	 * streams.  While we're at it, find the maximum number of streams that
3206	 * all the endpoints will support and check for duplicate endpoints.
3207	 */
3208	spin_lock_irqsave(&xhci->lock, flags);
3209	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3210			num_eps, &num_streams, &changed_ep_bitmask);
3211	if (ret < 0) {
3212		xhci_free_command(xhci, config_cmd);
3213		spin_unlock_irqrestore(&xhci->lock, flags);
3214		return ret;
3215	}
3216	if (num_streams <= 1) {
3217		xhci_warn(xhci, "WARN: endpoints can't handle "
3218				"more than one stream.\n");
3219		xhci_free_command(xhci, config_cmd);
3220		spin_unlock_irqrestore(&xhci->lock, flags);
3221		return -EINVAL;
3222	}
3223	vdev = xhci->devs[udev->slot_id];
3224	/* Mark each endpoint as being in transition, so
3225	 * xhci_urb_enqueue() will reject all URBs.
3226	 */
3227	for (i = 0; i < num_eps; i++) {
3228		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3229		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3230	}
3231	spin_unlock_irqrestore(&xhci->lock, flags);
3232
3233	/* Setup internal data structures and allocate HW data structures for
3234	 * streams (but don't install the HW structures in the input context
3235	 * until we're sure all memory allocation succeeded).
3236	 */
3237	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3238	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3239			num_stream_ctxs, num_streams);
3240
3241	for (i = 0; i < num_eps; i++) {
3242		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3243		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3244				num_stream_ctxs,
3245				num_streams, mem_flags);
3246		if (!vdev->eps[ep_index].stream_info)
3247			goto cleanup;
3248		/* Set maxPstreams in endpoint context and update deq ptr to
3249		 * point to stream context array. FIXME
3250		 */
3251	}
3252
3253	/* Set up the input context for a configure endpoint command. */
3254	for (i = 0; i < num_eps; i++) {
3255		struct xhci_ep_ctx *ep_ctx;
3256
3257		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3258		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3259
3260		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3261				vdev->out_ctx, ep_index);
3262		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3263				vdev->eps[ep_index].stream_info);
3264	}
3265	/* Tell the HW to drop its old copy of the endpoint context info
3266	 * and add the updated copy from the input context.
3267	 */
3268	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3269			vdev->out_ctx, ctrl_ctx,
3270			changed_ep_bitmask, changed_ep_bitmask);
3271
3272	/* Issue and wait for the configure endpoint command */
3273	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3274			false, false);
3275
3276	/* xHC rejected the configure endpoint command for some reason, so we
3277	 * leave the old ring intact and free our internal streams data
3278	 * structure.
3279	 */
3280	if (ret < 0)
3281		goto cleanup;
3282
3283	spin_lock_irqsave(&xhci->lock, flags);
3284	for (i = 0; i < num_eps; i++) {
3285		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3286		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3287		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3288			 udev->slot_id, ep_index);
3289		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3290	}
3291	xhci_free_command(xhci, config_cmd);
3292	spin_unlock_irqrestore(&xhci->lock, flags);
3293
3294	/* Subtract 1 for stream 0, which drivers can't use */
3295	return num_streams - 1;
3296
3297cleanup:
3298	/* If it didn't work, free the streams! */
3299	for (i = 0; i < num_eps; i++) {
3300		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3301		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3302		vdev->eps[ep_index].stream_info = NULL;
3303		/* FIXME Unset maxPstreams in endpoint context and
3304		 * update deq ptr to point to normal string ring.
3305		 */
3306		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3307		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3308		xhci_endpoint_zero(xhci, vdev, eps[i]);
3309	}
3310	xhci_free_command(xhci, config_cmd);
3311	return -ENOMEM;
3312}
3313
3314/* Transition the endpoint from using streams to being a "normal" endpoint
3315 * without streams.
3316 *
3317 * Modify the endpoint context state, submit a configure endpoint command,
3318 * and free all endpoint rings for streams if that completes successfully.
3319 */
3320int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3321		struct usb_host_endpoint **eps, unsigned int num_eps,
3322		gfp_t mem_flags)
3323{
3324	int i, ret;
3325	struct xhci_hcd *xhci;
3326	struct xhci_virt_device *vdev;
3327	struct xhci_command *command;
3328	struct xhci_input_control_ctx *ctrl_ctx;
3329	unsigned int ep_index;
3330	unsigned long flags;
3331	u32 changed_ep_bitmask;
3332
3333	xhci = hcd_to_xhci(hcd);
3334	vdev = xhci->devs[udev->slot_id];
3335
3336	/* Set up a configure endpoint command to remove the streams rings */
3337	spin_lock_irqsave(&xhci->lock, flags);
3338	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3339			udev, eps, num_eps);
3340	if (changed_ep_bitmask == 0) {
3341		spin_unlock_irqrestore(&xhci->lock, flags);
3342		return -EINVAL;
3343	}
3344
3345	/* Use the xhci_command structure from the first endpoint.  We may have
3346	 * allocated too many, but the driver may call xhci_free_streams() for
3347	 * each endpoint it grouped into one call to xhci_alloc_streams().
3348	 */
3349	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3350	command = vdev->eps[ep_index].stream_info->free_streams_command;
3351	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3352	if (!ctrl_ctx) {
3353		spin_unlock_irqrestore(&xhci->lock, flags);
3354		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3355				__func__);
3356		return -EINVAL;
3357	}
3358
3359	for (i = 0; i < num_eps; i++) {
3360		struct xhci_ep_ctx *ep_ctx;
3361
3362		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3363		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3364		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3365			EP_GETTING_NO_STREAMS;
3366
3367		xhci_endpoint_copy(xhci, command->in_ctx,
3368				vdev->out_ctx, ep_index);
3369		xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3370				&vdev->eps[ep_index]);
3371	}
3372	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3373			vdev->out_ctx, ctrl_ctx,
3374			changed_ep_bitmask, changed_ep_bitmask);
3375	spin_unlock_irqrestore(&xhci->lock, flags);
3376
3377	/* Issue and wait for the configure endpoint command,
3378	 * which must succeed.
3379	 */
3380	ret = xhci_configure_endpoint(xhci, udev, command,
3381			false, true);
3382
3383	/* xHC rejected the configure endpoint command for some reason, so we
3384	 * leave the streams rings intact.
3385	 */
3386	if (ret < 0)
3387		return ret;
3388
3389	spin_lock_irqsave(&xhci->lock, flags);
3390	for (i = 0; i < num_eps; i++) {
3391		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3392		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3393		vdev->eps[ep_index].stream_info = NULL;
3394		/* FIXME Unset maxPstreams in endpoint context and
3395		 * update deq ptr to point to normal string ring.
3396		 */
3397		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3398		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3399	}
3400	spin_unlock_irqrestore(&xhci->lock, flags);
3401
3402	return 0;
3403}
3404
3405/*
3406 * Deletes endpoint resources for endpoints that were active before a Reset
3407 * Device command, or a Disable Slot command.  The Reset Device command leaves
3408 * the control endpoint intact, whereas the Disable Slot command deletes it.
3409 *
3410 * Must be called with xhci->lock held.
3411 */
3412void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3413	struct xhci_virt_device *virt_dev, bool drop_control_ep)
3414{
3415	int i;
3416	unsigned int num_dropped_eps = 0;
3417	unsigned int drop_flags = 0;
3418
3419	for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3420		if (virt_dev->eps[i].ring) {
3421			drop_flags |= 1 << i;
3422			num_dropped_eps++;
3423		}
3424	}
3425	xhci->num_active_eps -= num_dropped_eps;
3426	if (num_dropped_eps)
3427		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3428				"Dropped %u ep ctxs, flags = 0x%x, "
3429				"%u now active.",
3430				num_dropped_eps, drop_flags,
3431				xhci->num_active_eps);
3432}
3433
3434/*
3435 * This submits a Reset Device Command, which will set the device state to 0,
3436 * set the device address to 0, and disable all the endpoints except the default
3437 * control endpoint.  The USB core should come back and call
3438 * xhci_address_device(), and then re-set up the configuration.  If this is
3439 * called because of a usb_reset_and_verify_device(), then the old alternate
3440 * settings will be re-installed through the normal bandwidth allocation
3441 * functions.
3442 *
3443 * Wait for the Reset Device command to finish.  Remove all structures
3444 * associated with the endpoints that were disabled.  Clear the input device
3445 * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
3446 *
3447 * If the virt_dev to be reset does not exist or does not match the udev,
3448 * it means the device is lost, possibly due to the xHC restore error and
3449 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3450 * re-allocate the device.
3451 */
3452int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3453{
3454	int ret, i;
3455	unsigned long flags;
3456	struct xhci_hcd *xhci;
3457	unsigned int slot_id;
3458	struct xhci_virt_device *virt_dev;
3459	struct xhci_command *reset_device_cmd;
3460	int last_freed_endpoint;
3461	struct xhci_slot_ctx *slot_ctx;
3462	int old_active_eps = 0;
3463
3464	ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3465	if (ret <= 0)
3466		return ret;
3467	xhci = hcd_to_xhci(hcd);
3468	slot_id = udev->slot_id;
3469	virt_dev = xhci->devs[slot_id];
3470	if (!virt_dev) {
3471		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3472				"not exist. Re-allocate the device\n", slot_id);
3473		ret = xhci_alloc_dev(hcd, udev);
3474		if (ret == 1)
3475			return 0;
3476		else
3477			return -EINVAL;
3478	}
3479
3480	if (virt_dev->tt_info)
3481		old_active_eps = virt_dev->tt_info->active_eps;
3482
3483	if (virt_dev->udev != udev) {
3484		/* If the virt_dev and the udev does not match, this virt_dev
3485		 * may belong to another udev.
3486		 * Re-allocate the device.
3487		 */
3488		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3489				"not match the udev. Re-allocate the device\n",
3490				slot_id);
3491		ret = xhci_alloc_dev(hcd, udev);
3492		if (ret == 1)
3493			return 0;
3494		else
3495			return -EINVAL;
3496	}
3497
3498	/* If device is not setup, there is no point in resetting it */
3499	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3500	if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3501						SLOT_STATE_DISABLED)
3502		return 0;
3503
3504	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3505	/* Allocate the command structure that holds the struct completion.
3506	 * Assume we're in process context, since the normal device reset
3507	 * process has to wait for the device anyway.  Storage devices are
3508	 * reset as part of error handling, so use GFP_NOIO instead of
3509	 * GFP_KERNEL.
3510	 */
3511	reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3512	if (!reset_device_cmd) {
3513		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3514		return -ENOMEM;
3515	}
3516
3517	/* Attempt to submit the Reset Device command to the command ring */
3518	spin_lock_irqsave(&xhci->lock, flags);
3519
3520	ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3521	if (ret) {
3522		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3523		spin_unlock_irqrestore(&xhci->lock, flags);
3524		goto command_cleanup;
3525	}
3526	xhci_ring_cmd_db(xhci);
3527	spin_unlock_irqrestore(&xhci->lock, flags);
3528
3529	/* Wait for the Reset Device command to finish */
3530	wait_for_completion(reset_device_cmd->completion);
3531
3532	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3533	 * unless we tried to reset a slot ID that wasn't enabled,
3534	 * or the device wasn't in the addressed or configured state.
3535	 */
3536	ret = reset_device_cmd->status;
3537	switch (ret) {
3538	case COMP_CMD_ABORT:
3539	case COMP_CMD_STOP:
3540		xhci_warn(xhci, "Timeout waiting for reset device command\n");
3541		ret = -ETIME;
3542		goto command_cleanup;
3543	case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3544	case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3545		xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3546				slot_id,
3547				xhci_get_slot_state(xhci, virt_dev->out_ctx));
3548		xhci_dbg(xhci, "Not freeing device rings.\n");
3549		/* Don't treat this as an error.  May change my mind later. */
3550		ret = 0;
3551		goto command_cleanup;
3552	case COMP_SUCCESS:
3553		xhci_dbg(xhci, "Successful reset device command.\n");
3554		break;
3555	default:
3556		if (xhci_is_vendor_info_code(xhci, ret))
3557			break;
3558		xhci_warn(xhci, "Unknown completion code %u for "
3559				"reset device command.\n", ret);
3560		ret = -EINVAL;
3561		goto command_cleanup;
3562	}
3563
3564	/* Free up host controller endpoint resources */
3565	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3566		spin_lock_irqsave(&xhci->lock, flags);
3567		/* Don't delete the default control endpoint resources */
3568		xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3569		spin_unlock_irqrestore(&xhci->lock, flags);
3570	}
3571
3572	/* Everything but endpoint 0 is disabled, so free or cache the rings. */
3573	last_freed_endpoint = 1;
3574	for (i = 1; i < 31; ++i) {
3575		struct xhci_virt_ep *ep = &virt_dev->eps[i];
3576
3577		if (ep->ep_state & EP_HAS_STREAMS) {
3578			xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3579					xhci_get_endpoint_address(i));
3580			xhci_free_stream_info(xhci, ep->stream_info);
3581			ep->stream_info = NULL;
3582			ep->ep_state &= ~EP_HAS_STREAMS;
3583		}
3584
3585		if (ep->ring) {
3586			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3587			last_freed_endpoint = i;
3588		}
3589		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3590			xhci_drop_ep_from_interval_table(xhci,
3591					&virt_dev->eps[i].bw_info,
3592					virt_dev->bw_table,
3593					udev,
3594					&virt_dev->eps[i],
3595					virt_dev->tt_info);
3596		xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3597	}
3598	/* If necessary, update the number of active TTs on this root port */
3599	xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3600
3601	xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3602	xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3603	ret = 0;
3604
3605command_cleanup:
3606	xhci_free_command(xhci, reset_device_cmd);
3607	return ret;
3608}
3609
3610/*
3611 * At this point, the struct usb_device is about to go away, the device has
3612 * disconnected, and all traffic has been stopped and the endpoints have been
3613 * disabled.  Free any HC data structures associated with that device.
3614 */
3615void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3616{
3617	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3618	struct xhci_virt_device *virt_dev;
3619	unsigned long flags;
3620	u32 state;
3621	int i, ret;
3622	struct xhci_command *command;
3623
3624	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3625	if (!command)
3626		return;
3627
3628#ifndef CONFIG_USB_DEFAULT_PERSIST
3629	/*
3630	 * We called pm_runtime_get_noresume when the device was attached.
3631	 * Decrement the counter here to allow controller to runtime suspend
3632	 * if no devices remain.
3633	 */
3634	if (xhci->quirks & XHCI_RESET_ON_RESUME)
3635		pm_runtime_put_noidle(hcd->self.controller);
3636#endif
3637
3638	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3639	/* If the host is halted due to driver unload, we still need to free the
3640	 * device.
3641	 */
3642	if (ret <= 0 && ret != -ENODEV) {
3643		kfree(command);
3644		return;
3645	}
3646
3647	virt_dev = xhci->devs[udev->slot_id];
3648
3649	/* Stop any wayward timer functions (which may grab the lock) */
3650	for (i = 0; i < 31; ++i) {
3651		virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3652		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3653	}
3654
3655	spin_lock_irqsave(&xhci->lock, flags);
3656	/* Don't disable the slot if the host controller is dead. */
3657	state = readl(&xhci->op_regs->status);
3658	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3659			(xhci->xhc_state & XHCI_STATE_HALTED)) {
3660		xhci_free_virt_device(xhci, udev->slot_id);
3661		spin_unlock_irqrestore(&xhci->lock, flags);
3662		kfree(command);
3663		return;
3664	}
3665
3666	if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3667				    udev->slot_id)) {
3668		spin_unlock_irqrestore(&xhci->lock, flags);
3669		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3670		return;
3671	}
3672	xhci_ring_cmd_db(xhci);
3673	spin_unlock_irqrestore(&xhci->lock, flags);
3674
3675	/*
3676	 * Event command completion handler will free any data structures
3677	 * associated with the slot.  XXX Can free sleep?
3678	 */
3679}
3680
3681/*
3682 * Checks if we have enough host controller resources for the default control
3683 * endpoint.
3684 *
3685 * Must be called with xhci->lock held.
3686 */
3687static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3688{
3689	if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3690		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3691				"Not enough ep ctxs: "
3692				"%u active, need to add 1, limit is %u.",
3693				xhci->num_active_eps, xhci->limit_active_eps);
3694		return -ENOMEM;
3695	}
3696	xhci->num_active_eps += 1;
3697	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3698			"Adding 1 ep ctx, %u now active.",
3699			xhci->num_active_eps);
3700	return 0;
3701}
3702
3703
3704/*
3705 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3706 * timed out, or allocating memory failed.  Returns 1 on success.
3707 */
3708int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3709{
3710	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3711	unsigned long flags;
3712	int ret, slot_id;
3713	struct xhci_command *command;
3714
3715	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3716	if (!command)
3717		return 0;
3718
3719	/* xhci->slot_id and xhci->addr_dev are not thread-safe */
3720	mutex_lock(&xhci->mutex);
3721	spin_lock_irqsave(&xhci->lock, flags);
3722	command->completion = &xhci->addr_dev;
3723	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3724	if (ret) {
3725		spin_unlock_irqrestore(&xhci->lock, flags);
3726		mutex_unlock(&xhci->mutex);
3727		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3728		kfree(command);
3729		return 0;
3730	}
3731	xhci_ring_cmd_db(xhci);
3732	spin_unlock_irqrestore(&xhci->lock, flags);
3733
3734	wait_for_completion(command->completion);
3735	slot_id = xhci->slot_id;
3736	mutex_unlock(&xhci->mutex);
3737
3738	if (!slot_id || command->status != COMP_SUCCESS) {
3739		xhci_err(xhci, "Error while assigning device slot ID\n");
3740		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3741				HCS_MAX_SLOTS(
3742					readl(&xhci->cap_regs->hcs_params1)));
3743		kfree(command);
3744		return 0;
3745	}
3746
3747	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3748		spin_lock_irqsave(&xhci->lock, flags);
3749		ret = xhci_reserve_host_control_ep_resources(xhci);
3750		if (ret) {
3751			spin_unlock_irqrestore(&xhci->lock, flags);
3752			xhci_warn(xhci, "Not enough host resources, "
3753					"active endpoint contexts = %u\n",
3754					xhci->num_active_eps);
3755			goto disable_slot;
3756		}
3757		spin_unlock_irqrestore(&xhci->lock, flags);
3758	}
3759	/* Use GFP_NOIO, since this function can be called from
3760	 * xhci_discover_or_reset_device(), which may be called as part of
3761	 * mass storage driver error handling.
3762	 */
3763	if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3764		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3765		goto disable_slot;
3766	}
3767	udev->slot_id = slot_id;
3768
3769#ifndef CONFIG_USB_DEFAULT_PERSIST
3770	/*
3771	 * If resetting upon resume, we can't put the controller into runtime
3772	 * suspend if there is a device attached.
3773	 */
3774	if (xhci->quirks & XHCI_RESET_ON_RESUME)
3775		pm_runtime_get_noresume(hcd->self.controller);
3776#endif
3777
3778
3779	kfree(command);
3780	/* Is this a LS or FS device under a HS hub? */
3781	/* Hub or peripherial? */
3782	return 1;
3783
3784disable_slot:
3785	/* Disable slot, if we can do it without mem alloc */
3786	spin_lock_irqsave(&xhci->lock, flags);
3787	command->completion = NULL;
3788	command->status = 0;
3789	if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3790				     udev->slot_id))
3791		xhci_ring_cmd_db(xhci);
3792	spin_unlock_irqrestore(&xhci->lock, flags);
3793	return 0;
3794}
3795
3796/*
3797 * Issue an Address Device command and optionally send a corresponding
3798 * SetAddress request to the device.
3799 * We should be protected by the usb_address0_mutex in hub_wq's hub_port_init,
3800 * so we should only issue and wait on one address command at the same time.
3801 */
3802static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3803			     enum xhci_setup_dev setup)
3804{
3805	const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3806	unsigned long flags;
3807	struct xhci_virt_device *virt_dev;
3808	int ret = 0;
3809	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3810	struct xhci_slot_ctx *slot_ctx;
3811	struct xhci_input_control_ctx *ctrl_ctx;
3812	u64 temp_64;
3813	struct xhci_command *command = NULL;
3814
3815	mutex_lock(&xhci->mutex);
3816
3817	if (xhci->xhc_state)	/* dying, removing or halted */
3818		goto out;
3819
3820	if (!udev->slot_id) {
3821		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3822				"Bad Slot ID %d", udev->slot_id);
3823		ret = -EINVAL;
3824		goto out;
3825	}
3826
3827	virt_dev = xhci->devs[udev->slot_id];
3828
3829	if (WARN_ON(!virt_dev)) {
3830		/*
3831		 * In plug/unplug torture test with an NEC controller,
3832		 * a zero-dereference was observed once due to virt_dev = 0.
3833		 * Print useful debug rather than crash if it is observed again!
3834		 */
3835		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3836			udev->slot_id);
3837		ret = -EINVAL;
3838		goto out;
3839	}
3840
3841	if (setup == SETUP_CONTEXT_ONLY) {
3842		slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3843		if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3844		    SLOT_STATE_DEFAULT) {
3845			xhci_dbg(xhci, "Slot already in default state\n");
3846			goto out;
3847		}
3848	}
3849
3850	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3851	if (!command) {
3852		ret = -ENOMEM;
3853		goto out;
3854	}
3855
3856	command->in_ctx = virt_dev->in_ctx;
3857	command->completion = &xhci->addr_dev;
3858
3859	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3860	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3861	if (!ctrl_ctx) {
3862		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3863				__func__);
3864		ret = -EINVAL;
3865		goto out;
3866	}
3867	/*
3868	 * If this is the first Set Address since device plug-in or
3869	 * virt_device realloaction after a resume with an xHCI power loss,
3870	 * then set up the slot context.
3871	 */
3872	if (!slot_ctx->dev_info)
3873		xhci_setup_addressable_virt_dev(xhci, udev);
3874	/* Otherwise, update the control endpoint ring enqueue pointer. */
3875	else
3876		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3877	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3878	ctrl_ctx->drop_flags = 0;
3879
3880	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3881	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3882	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3883				le32_to_cpu(slot_ctx->dev_info) >> 27);
3884
3885	spin_lock_irqsave(&xhci->lock, flags);
3886	ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3887					udev->slot_id, setup);
3888	if (ret) {
3889		spin_unlock_irqrestore(&xhci->lock, flags);
3890		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3891				"FIXME: allocate a command ring segment");
3892		goto out;
3893	}
3894	xhci_ring_cmd_db(xhci);
3895	spin_unlock_irqrestore(&xhci->lock, flags);
3896
3897	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3898	wait_for_completion(command->completion);
3899
3900	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
3901	 * the SetAddress() "recovery interval" required by USB and aborting the
3902	 * command on a timeout.
3903	 */
3904	switch (command->status) {
3905	case COMP_CMD_ABORT:
3906	case COMP_CMD_STOP:
3907		xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3908		ret = -ETIME;
3909		break;
3910	case COMP_CTX_STATE:
3911	case COMP_EBADSLT:
3912		xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3913			 act, udev->slot_id);
3914		ret = -EINVAL;
3915		break;
3916	case COMP_TX_ERR:
3917		dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3918		ret = -EPROTO;
3919		break;
3920	case COMP_DEV_ERR:
3921		dev_warn(&udev->dev,
3922			 "ERROR: Incompatible device for setup %s command\n", act);
3923		ret = -ENODEV;
3924		break;
3925	case COMP_SUCCESS:
3926		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3927			       "Successful setup %s command", act);
3928		break;
3929	default:
3930		xhci_err(xhci,
3931			 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3932			 act, command->status);
3933		xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3934		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3935		trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3936		ret = -EINVAL;
3937		break;
3938	}
3939	if (ret)
3940		goto out;
3941	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3942	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3943			"Op regs DCBAA ptr = %#016llx", temp_64);
3944	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3945		"Slot ID %d dcbaa entry @%p = %#016llx",
3946		udev->slot_id,
3947		&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3948		(unsigned long long)
3949		le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3950	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3951			"Output Context DMA address = %#08llx",
3952			(unsigned long long)virt_dev->out_ctx->dma);
3953	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3954	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3955	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3956				le32_to_cpu(slot_ctx->dev_info) >> 27);
3957	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3958	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3959	/*
3960	 * USB core uses address 1 for the roothubs, so we add one to the
3961	 * address given back to us by the HC.
3962	 */
3963	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3964	trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3965				le32_to_cpu(slot_ctx->dev_info) >> 27);
3966	/* Zero the input context control for later use */
3967	ctrl_ctx->add_flags = 0;
3968	ctrl_ctx->drop_flags = 0;
3969
3970	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3971		       "Internal device address = %d",
3972		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3973out:
3974	mutex_unlock(&xhci->mutex);
3975	kfree(command);
3976	return ret;
3977}
3978
3979int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3980{
3981	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3982}
3983
3984int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3985{
3986	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3987}
3988
3989/*
3990 * Transfer the port index into real index in the HW port status
3991 * registers. Caculate offset between the port's PORTSC register
3992 * and port status base. Divide the number of per port register
3993 * to get the real index. The raw port number bases 1.
3994 */
3995int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3996{
3997	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3998	__le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3999	__le32 __iomem *addr;
4000	int raw_port;
4001
4002	if (hcd->speed != HCD_USB3)
4003		addr = xhci->usb2_ports[port1 - 1];
4004	else
4005		addr = xhci->usb3_ports[port1 - 1];
4006
4007	raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
4008	return raw_port;
4009}
4010
4011/*
4012 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4013 * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
4014 */
4015static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4016			struct usb_device *udev, u16 max_exit_latency)
4017{
4018	struct xhci_virt_device *virt_dev;
4019	struct xhci_command *command;
4020	struct xhci_input_control_ctx *ctrl_ctx;
4021	struct xhci_slot_ctx *slot_ctx;
4022	unsigned long flags;
4023	int ret;
4024
4025	spin_lock_irqsave(&xhci->lock, flags);
4026
4027	virt_dev = xhci->devs[udev->slot_id];
4028
4029	/*
4030	 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4031	 * xHC was re-initialized. Exit latency will be set later after
4032	 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4033	 */
4034
4035	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4036		spin_unlock_irqrestore(&xhci->lock, flags);
4037		return 0;
4038	}
4039
4040	/* Attempt to issue an Evaluate Context command to change the MEL. */
4041	command = xhci->lpm_command;
4042	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4043	if (!ctrl_ctx) {
4044		spin_unlock_irqrestore(&xhci->lock, flags);
4045		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4046				__func__);
4047		return -ENOMEM;
4048	}
4049
4050	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4051	spin_unlock_irqrestore(&xhci->lock, flags);
4052
4053	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4054	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4055	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4056	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4057	slot_ctx->dev_state = 0;
4058
4059	xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4060			"Set up evaluate context for LPM MEL change.");
4061	xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4062	xhci_dbg_ctx(xhci, command->in_ctx, 0);
4063
4064	/* Issue and wait for the evaluate context command. */
4065	ret = xhci_configure_endpoint(xhci, udev, command,
4066			true, true);
4067	xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4068	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4069
4070	if (!ret) {
4071		spin_lock_irqsave(&xhci->lock, flags);
4072		virt_dev->current_mel = max_exit_latency;
4073		spin_unlock_irqrestore(&xhci->lock, flags);
4074	}
4075	return ret;
4076}
4077
4078#ifdef CONFIG_PM
4079
4080/* BESL to HIRD Encoding array for USB2 LPM */
4081static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4082	3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4083
4084/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4085static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4086					struct usb_device *udev)
4087{
4088	int u2del, besl, besl_host;
4089	int besl_device = 0;
4090	u32 field;
4091
4092	u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4093	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4094
4095	if (field & USB_BESL_SUPPORT) {
4096		for (besl_host = 0; besl_host < 16; besl_host++) {
4097			if (xhci_besl_encoding[besl_host] >= u2del)
4098				break;
4099		}
4100		/* Use baseline BESL value as default */
4101		if (field & USB_BESL_BASELINE_VALID)
4102			besl_device = USB_GET_BESL_BASELINE(field);
4103		else if (field & USB_BESL_DEEP_VALID)
4104			besl_device = USB_GET_BESL_DEEP(field);
4105	} else {
4106		if (u2del <= 50)
4107			besl_host = 0;
4108		else
4109			besl_host = (u2del - 51) / 75 + 1;
4110	}
4111
4112	besl = besl_host + besl_device;
4113	if (besl > 15)
4114		besl = 15;
4115
4116	return besl;
4117}
4118
4119/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4120static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4121{
4122	u32 field;
4123	int l1;
4124	int besld = 0;
4125	int hirdm = 0;
4126
4127	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4128
4129	/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4130	l1 = udev->l1_params.timeout / 256;
4131
4132	/* device has preferred BESLD */
4133	if (field & USB_BESL_DEEP_VALID) {
4134		besld = USB_GET_BESL_DEEP(field);
4135		hirdm = 1;
4136	}
4137
4138	return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4139}
4140
4141int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4142			struct usb_device *udev, int enable)
4143{
4144	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
4145	__le32 __iomem	**port_array;
4146	__le32 __iomem	*pm_addr, *hlpm_addr;
4147	u32		pm_val, hlpm_val, field;
4148	unsigned int	port_num;
4149	unsigned long	flags;
4150	int		hird, exit_latency;
4151	int		ret;
4152
4153	if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
4154			!udev->lpm_capable)
4155		return -EPERM;
4156
4157	if (!udev->parent || udev->parent->parent ||
4158			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4159		return -EPERM;
4160
4161	if (udev->usb2_hw_lpm_capable != 1)
4162		return -EPERM;
4163
4164	spin_lock_irqsave(&xhci->lock, flags);
4165
4166	port_array = xhci->usb2_ports;
4167	port_num = udev->portnum - 1;
4168	pm_addr = port_array[port_num] + PORTPMSC;
4169	pm_val = readl(pm_addr);
4170	hlpm_addr = port_array[port_num] + PORTHLPMC;
4171	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4172
4173	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4174			enable ? "enable" : "disable", port_num + 1);
4175
4176	if (enable) {
4177		/* Host supports BESL timeout instead of HIRD */
4178		if (udev->usb2_hw_lpm_besl_capable) {
4179			/* if device doesn't have a preferred BESL value use a
4180			 * default one which works with mixed HIRD and BESL
4181			 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4182			 */
4183			if ((field & USB_BESL_SUPPORT) &&
4184			    (field & USB_BESL_BASELINE_VALID))
4185				hird = USB_GET_BESL_BASELINE(field);
4186			else
4187				hird = udev->l1_params.besl;
4188
4189			exit_latency = xhci_besl_encoding[hird];
4190			spin_unlock_irqrestore(&xhci->lock, flags);
4191
4192			/* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4193			 * input context for link powermanagement evaluate
4194			 * context commands. It is protected by hcd->bandwidth
4195			 * mutex and is shared by all devices. We need to set
4196			 * the max ext latency in USB 2 BESL LPM as well, so
4197			 * use the same mutex and xhci_change_max_exit_latency()
4198			 */
4199			mutex_lock(hcd->bandwidth_mutex);
4200			ret = xhci_change_max_exit_latency(xhci, udev,
4201							   exit_latency);
4202			mutex_unlock(hcd->bandwidth_mutex);
4203
4204			if (ret < 0)
4205				return ret;
4206			spin_lock_irqsave(&xhci->lock, flags);
4207
4208			hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4209			writel(hlpm_val, hlpm_addr);
4210			/* flush write */
4211			readl(hlpm_addr);
4212		} else {
4213			hird = xhci_calculate_hird_besl(xhci, udev);
4214		}
4215
4216		pm_val &= ~PORT_HIRD_MASK;
4217		pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4218		writel(pm_val, pm_addr);
4219		pm_val = readl(pm_addr);
4220		pm_val |= PORT_HLE;
4221		writel(pm_val, pm_addr);
4222		/* flush write */
4223		readl(pm_addr);
4224	} else {
4225		pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4226		writel(pm_val, pm_addr);
4227		/* flush write */
4228		readl(pm_addr);
4229		if (udev->usb2_hw_lpm_besl_capable) {
4230			spin_unlock_irqrestore(&xhci->lock, flags);
4231			mutex_lock(hcd->bandwidth_mutex);
4232			xhci_change_max_exit_latency(xhci, udev, 0);
4233			mutex_unlock(hcd->bandwidth_mutex);
4234			return 0;
4235		}
4236	}
4237
4238	spin_unlock_irqrestore(&xhci->lock, flags);
4239	return 0;
4240}
4241
4242/* check if a usb2 port supports a given extened capability protocol
4243 * only USB2 ports extended protocol capability values are cached.
4244 * Return 1 if capability is supported
4245 */
4246static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4247					   unsigned capability)
4248{
4249	u32 port_offset, port_count;
4250	int i;
4251
4252	for (i = 0; i < xhci->num_ext_caps; i++) {
4253		if (xhci->ext_caps[i] & capability) {
4254			/* port offsets starts at 1 */
4255			port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4256			port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4257			if (port >= port_offset &&
4258			    port < port_offset + port_count)
4259				return 1;
4260		}
4261	}
4262	return 0;
4263}
4264
4265int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4266{
4267	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
4268	int		portnum = udev->portnum - 1;
4269
4270	if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
4271			!udev->lpm_capable)
4272		return 0;
4273
4274	/* we only support lpm for non-hub device connected to root hub yet */
4275	if (!udev->parent || udev->parent->parent ||
4276			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4277		return 0;
4278
4279	if (xhci->hw_lpm_support == 1 &&
4280			xhci_check_usb2_port_capability(
4281				xhci, portnum, XHCI_HLC)) {
4282		udev->usb2_hw_lpm_capable = 1;
4283		udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4284		udev->l1_params.besl = XHCI_DEFAULT_BESL;
4285		if (xhci_check_usb2_port_capability(xhci, portnum,
4286					XHCI_BLC))
4287			udev->usb2_hw_lpm_besl_capable = 1;
4288	}
4289
4290	return 0;
4291}
4292
4293/*---------------------- USB 3.0 Link PM functions ------------------------*/
4294
4295/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4296static unsigned long long xhci_service_interval_to_ns(
4297		struct usb_endpoint_descriptor *desc)
4298{
4299	return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4300}
4301
4302static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4303		enum usb3_link_state state)
4304{
4305	unsigned long long sel;
4306	unsigned long long pel;
4307	unsigned int max_sel_pel;
4308	char *state_name;
4309
4310	switch (state) {
4311	case USB3_LPM_U1:
4312		/* Convert SEL and PEL stored in nanoseconds to microseconds */
4313		sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4314		pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4315		max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4316		state_name = "U1";
4317		break;
4318	case USB3_LPM_U2:
4319		sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4320		pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4321		max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4322		state_name = "U2";
4323		break;
4324	default:
4325		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4326				__func__);
4327		return USB3_LPM_DISABLED;
4328	}
4329
4330	if (sel <= max_sel_pel && pel <= max_sel_pel)
4331		return USB3_LPM_DEVICE_INITIATED;
4332
4333	if (sel > max_sel_pel)
4334		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4335				"due to long SEL %llu ms\n",
4336				state_name, sel);
4337	else
4338		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4339				"due to long PEL %llu ms\n",
4340				state_name, pel);
4341	return USB3_LPM_DISABLED;
4342}
4343
4344/* The U1 timeout should be the maximum of the following values:
4345 *  - For control endpoints, U1 system exit latency (SEL) * 3
4346 *  - For bulk endpoints, U1 SEL * 5
4347 *  - For interrupt endpoints:
4348 *    - Notification EPs, U1 SEL * 3
4349 *    - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4350 *  - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4351 */
4352static unsigned long long xhci_calculate_intel_u1_timeout(
4353		struct usb_device *udev,
4354		struct usb_endpoint_descriptor *desc)
4355{
4356	unsigned long long timeout_ns;
4357	int ep_type;
4358	int intr_type;
4359
4360	ep_type = usb_endpoint_type(desc);
4361	switch (ep_type) {
4362	case USB_ENDPOINT_XFER_CONTROL:
4363		timeout_ns = udev->u1_params.sel * 3;
4364		break;
4365	case USB_ENDPOINT_XFER_BULK:
4366		timeout_ns = udev->u1_params.sel * 5;
4367		break;
4368	case USB_ENDPOINT_XFER_INT:
4369		intr_type = usb_endpoint_interrupt_type(desc);
4370		if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4371			timeout_ns = udev->u1_params.sel * 3;
4372			break;
4373		}
4374		/* Otherwise the calculation is the same as isoc eps */
4375	case USB_ENDPOINT_XFER_ISOC:
4376		timeout_ns = xhci_service_interval_to_ns(desc);
4377		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4378		if (timeout_ns < udev->u1_params.sel * 2)
4379			timeout_ns = udev->u1_params.sel * 2;
4380		break;
4381	default:
4382		return 0;
4383	}
4384
4385	return timeout_ns;
4386}
4387
4388/* Returns the hub-encoded U1 timeout value. */
4389static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4390		struct usb_device *udev,
4391		struct usb_endpoint_descriptor *desc)
4392{
4393	unsigned long long timeout_ns;
4394
4395	if (xhci->quirks & XHCI_INTEL_HOST)
4396		timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4397	else
4398		timeout_ns = udev->u1_params.sel;
4399
4400	/* The U1 timeout is encoded in 1us intervals.
4401	 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4402	 */
4403	if (timeout_ns == USB3_LPM_DISABLED)
4404		timeout_ns = 1;
4405	else
4406		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4407
4408	/* If the necessary timeout value is bigger than what we can set in the
4409	 * USB 3.0 hub, we have to disable hub-initiated U1.
4410	 */
4411	if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4412		return timeout_ns;
4413	dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4414			"due to long timeout %llu ms\n", timeout_ns);
4415	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4416}
4417
4418/* The U2 timeout should be the maximum of:
4419 *  - 10 ms (to avoid the bandwidth impact on the scheduler)
4420 *  - largest bInterval of any active periodic endpoint (to avoid going
4421 *    into lower power link states between intervals).
4422 *  - the U2 Exit Latency of the device
4423 */
4424static unsigned long long xhci_calculate_intel_u2_timeout(
4425		struct usb_device *udev,
4426		struct usb_endpoint_descriptor *desc)
4427{
4428	unsigned long long timeout_ns;
4429	unsigned long long u2_del_ns;
4430
4431	timeout_ns = 10 * 1000 * 1000;
4432
4433	if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4434			(xhci_service_interval_to_ns(desc) > timeout_ns))
4435		timeout_ns = xhci_service_interval_to_ns(desc);
4436
4437	u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4438	if (u2_del_ns > timeout_ns)
4439		timeout_ns = u2_del_ns;
4440
4441	return timeout_ns;
4442}
4443
4444/* Returns the hub-encoded U2 timeout value. */
4445static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4446		struct usb_device *udev,
4447		struct usb_endpoint_descriptor *desc)
4448{
4449	unsigned long long timeout_ns;
4450
4451	if (xhci->quirks & XHCI_INTEL_HOST)
4452		timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4453	else
4454		timeout_ns = udev->u2_params.sel;
4455
4456	/* The U2 timeout is encoded in 256us intervals */
4457	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4458	/* If the necessary timeout value is bigger than what we can set in the
4459	 * USB 3.0 hub, we have to disable hub-initiated U2.
4460	 */
4461	if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4462		return timeout_ns;
4463	dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4464			"due to long timeout %llu ms\n", timeout_ns);
4465	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4466}
4467
4468static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4469		struct usb_device *udev,
4470		struct usb_endpoint_descriptor *desc,
4471		enum usb3_link_state state,
4472		u16 *timeout)
4473{
4474	if (state == USB3_LPM_U1)
4475		return xhci_calculate_u1_timeout(xhci, udev, desc);
4476	else if (state == USB3_LPM_U2)
4477		return xhci_calculate_u2_timeout(xhci, udev, desc);
4478
4479	return USB3_LPM_DISABLED;
4480}
4481
4482static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4483		struct usb_device *udev,
4484		struct usb_endpoint_descriptor *desc,
4485		enum usb3_link_state state,
4486		u16 *timeout)
4487{
4488	u16 alt_timeout;
4489
4490	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4491		desc, state, timeout);
4492
4493	/* If we found we can't enable hub-initiated LPM, or
4494	 * the U1 or U2 exit latency was too high to allow
4495	 * device-initiated LPM as well, just stop searching.
4496	 */
4497	if (alt_timeout == USB3_LPM_DISABLED ||
4498			alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4499		*timeout = alt_timeout;
4500		return -E2BIG;
4501	}
4502	if (alt_timeout > *timeout)
4503		*timeout = alt_timeout;
4504	return 0;
4505}
4506
4507static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4508		struct usb_device *udev,
4509		struct usb_host_interface *alt,
4510		enum usb3_link_state state,
4511		u16 *timeout)
4512{
4513	int j;
4514
4515	for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4516		if (xhci_update_timeout_for_endpoint(xhci, udev,
4517					&alt->endpoint[j].desc, state, timeout))
4518			return -E2BIG;
4519		continue;
4520	}
4521	return 0;
4522}
4523
4524static int xhci_check_intel_tier_policy(struct usb_device *udev,
4525		enum usb3_link_state state)
4526{
4527	struct usb_device *parent;
4528	unsigned int num_hubs;
4529
4530	if (state == USB3_LPM_U2)
4531		return 0;
4532
4533	/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4534	for (parent = udev->parent, num_hubs = 0; parent->parent;
4535			parent = parent->parent)
4536		num_hubs++;
4537
4538	if (num_hubs < 2)
4539		return 0;
4540
4541	dev_dbg(&udev->dev, "Disabling U1 link state for device"
4542			" below second-tier hub.\n");
4543	dev_dbg(&udev->dev, "Plug device into first-tier hub "
4544			"to decrease power consumption.\n");
4545	return -E2BIG;
4546}
4547
4548static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4549		struct usb_device *udev,
4550		enum usb3_link_state state)
4551{
4552	if (xhci->quirks & XHCI_INTEL_HOST)
4553		return xhci_check_intel_tier_policy(udev, state);
4554	else
4555		return 0;
4556}
4557
4558/* Returns the U1 or U2 timeout that should be enabled.
4559 * If the tier check or timeout setting functions return with a non-zero exit
4560 * code, that means the timeout value has been finalized and we shouldn't look
4561 * at any more endpoints.
4562 */
4563static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4564			struct usb_device *udev, enum usb3_link_state state)
4565{
4566	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4567	struct usb_host_config *config;
4568	char *state_name;
4569	int i;
4570	u16 timeout = USB3_LPM_DISABLED;
4571
4572	if (state == USB3_LPM_U1)
4573		state_name = "U1";
4574	else if (state == USB3_LPM_U2)
4575		state_name = "U2";
4576	else {
4577		dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4578				state);
4579		return timeout;
4580	}
4581
4582	if (xhci_check_tier_policy(xhci, udev, state) < 0)
4583		return timeout;
4584
4585	/* Gather some information about the currently installed configuration
4586	 * and alternate interface settings.
4587	 */
4588	if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4589			state, &timeout))
4590		return timeout;
4591
4592	config = udev->actconfig;
4593	if (!config)
4594		return timeout;
4595
4596	for (i = 0; i < config->desc.bNumInterfaces; i++) {
4597		struct usb_driver *driver;
4598		struct usb_interface *intf = config->interface[i];
4599
4600		if (!intf)
4601			continue;
4602
4603		/* Check if any currently bound drivers want hub-initiated LPM
4604		 * disabled.
4605		 */
4606		if (intf->dev.driver) {
4607			driver = to_usb_driver(intf->dev.driver);
4608			if (driver && driver->disable_hub_initiated_lpm) {
4609				dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4610						"at request of driver %s\n",
4611						state_name, driver->name);
4612				return xhci_get_timeout_no_hub_lpm(udev, state);
4613			}
4614		}
4615
4616		/* Not sure how this could happen... */
4617		if (!intf->cur_altsetting)
4618			continue;
4619
4620		if (xhci_update_timeout_for_interface(xhci, udev,
4621					intf->cur_altsetting,
4622					state, &timeout))
4623			return timeout;
4624	}
4625	return timeout;
4626}
4627
4628static int calculate_max_exit_latency(struct usb_device *udev,
4629		enum usb3_link_state state_changed,
4630		u16 hub_encoded_timeout)
4631{
4632	unsigned long long u1_mel_us = 0;
4633	unsigned long long u2_mel_us = 0;
4634	unsigned long long mel_us = 0;
4635	bool disabling_u1;
4636	bool disabling_u2;
4637	bool enabling_u1;
4638	bool enabling_u2;
4639
4640	disabling_u1 = (state_changed == USB3_LPM_U1 &&
4641			hub_encoded_timeout == USB3_LPM_DISABLED);
4642	disabling_u2 = (state_changed == USB3_LPM_U2 &&
4643			hub_encoded_timeout == USB3_LPM_DISABLED);
4644
4645	enabling_u1 = (state_changed == USB3_LPM_U1 &&
4646			hub_encoded_timeout != USB3_LPM_DISABLED);
4647	enabling_u2 = (state_changed == USB3_LPM_U2 &&
4648			hub_encoded_timeout != USB3_LPM_DISABLED);
4649
4650	/* If U1 was already enabled and we're not disabling it,
4651	 * or we're going to enable U1, account for the U1 max exit latency.
4652	 */
4653	if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4654			enabling_u1)
4655		u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4656	if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4657			enabling_u2)
4658		u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4659
4660	if (u1_mel_us > u2_mel_us)
4661		mel_us = u1_mel_us;
4662	else
4663		mel_us = u2_mel_us;
4664	/* xHCI host controller max exit latency field is only 16 bits wide. */
4665	if (mel_us > MAX_EXIT) {
4666		dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4667				"is too big.\n", mel_us);
4668		return -E2BIG;
4669	}
4670	return mel_us;
4671}
4672
4673/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4674int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4675			struct usb_device *udev, enum usb3_link_state state)
4676{
4677	struct xhci_hcd	*xhci;
4678	u16 hub_encoded_timeout;
4679	int mel;
4680	int ret;
4681
4682	xhci = hcd_to_xhci(hcd);
4683	/* The LPM timeout values are pretty host-controller specific, so don't
4684	 * enable hub-initiated timeouts unless the vendor has provided
4685	 * information about their timeout algorithm.
4686	 */
4687	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4688			!xhci->devs[udev->slot_id])
4689		return USB3_LPM_DISABLED;
4690
4691	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4692	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4693	if (mel < 0) {
4694		/* Max Exit Latency is too big, disable LPM. */
4695		hub_encoded_timeout = USB3_LPM_DISABLED;
4696		mel = 0;
4697	}
4698
4699	ret = xhci_change_max_exit_latency(xhci, udev, mel);
4700	if (ret)
4701		return ret;
4702	return hub_encoded_timeout;
4703}
4704
4705int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4706			struct usb_device *udev, enum usb3_link_state state)
4707{
4708	struct xhci_hcd	*xhci;
4709	u16 mel;
4710	int ret;
4711
4712	xhci = hcd_to_xhci(hcd);
4713	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4714			!xhci->devs[udev->slot_id])
4715		return 0;
4716
4717	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4718	ret = xhci_change_max_exit_latency(xhci, udev, mel);
4719	if (ret)
4720		return ret;
4721	return 0;
4722}
4723#else /* CONFIG_PM */
4724
4725int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4726				struct usb_device *udev, int enable)
4727{
4728	return 0;
4729}
4730
4731int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4732{
4733	return 0;
4734}
4735
4736int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4737			struct usb_device *udev, enum usb3_link_state state)
4738{
4739	return USB3_LPM_DISABLED;
4740}
4741
4742int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4743			struct usb_device *udev, enum usb3_link_state state)
4744{
4745	return 0;
4746}
4747#endif	/* CONFIG_PM */
4748
4749/*-------------------------------------------------------------------------*/
4750
4751/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4752 * internal data structures for the device.
4753 */
4754int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4755			struct usb_tt *tt, gfp_t mem_flags)
4756{
4757	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4758	struct xhci_virt_device *vdev;
4759	struct xhci_command *config_cmd;
4760	struct xhci_input_control_ctx *ctrl_ctx;
4761	struct xhci_slot_ctx *slot_ctx;
4762	unsigned long flags;
4763	unsigned think_time;
4764	int ret;
4765
4766	/* Ignore root hubs */
4767	if (!hdev->parent)
4768		return 0;
4769
4770	vdev = xhci->devs[hdev->slot_id];
4771	if (!vdev) {
4772		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4773		return -EINVAL;
4774	}
4775	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4776	if (!config_cmd) {
4777		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4778		return -ENOMEM;
4779	}
4780	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4781	if (!ctrl_ctx) {
4782		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4783				__func__);
4784		xhci_free_command(xhci, config_cmd);
4785		return -ENOMEM;
4786	}
4787
4788	spin_lock_irqsave(&xhci->lock, flags);
4789	if (hdev->speed == USB_SPEED_HIGH &&
4790			xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4791		xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4792		xhci_free_command(xhci, config_cmd);
4793		spin_unlock_irqrestore(&xhci->lock, flags);
4794		return -ENOMEM;
4795	}
4796
4797	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4798	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4799	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4800	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4801	/*
4802	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4803	 * but it may be already set to 1 when setup an xHCI virtual
4804	 * device, so clear it anyway.
4805	 */
4806	if (tt->multi)
4807		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4808	else if (hdev->speed == USB_SPEED_FULL)
4809		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4810
4811	if (xhci->hci_version > 0x95) {
4812		xhci_dbg(xhci, "xHCI version %x needs hub "
4813				"TT think time and number of ports\n",
4814				(unsigned int) xhci->hci_version);
4815		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4816		/* Set TT think time - convert from ns to FS bit times.
4817		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4818		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4819		 *
4820		 * xHCI 1.0: this field shall be 0 if the device is not a
4821		 * High-spped hub.
4822		 */
4823		think_time = tt->think_time;
4824		if (think_time != 0)
4825			think_time = (think_time / 666) - 1;
4826		if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4827			slot_ctx->tt_info |=
4828				cpu_to_le32(TT_THINK_TIME(think_time));
4829	} else {
4830		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4831				"TT think time or number of ports\n",
4832				(unsigned int) xhci->hci_version);
4833	}
4834	slot_ctx->dev_state = 0;
4835	spin_unlock_irqrestore(&xhci->lock, flags);
4836
4837	xhci_dbg(xhci, "Set up %s for hub device.\n",
4838			(xhci->hci_version > 0x95) ?
4839			"configure endpoint" : "evaluate context");
4840	xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4841	xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4842
4843	/* Issue and wait for the configure endpoint or
4844	 * evaluate context command.
4845	 */
4846	if (xhci->hci_version > 0x95)
4847		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4848				false, false);
4849	else
4850		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4851				true, false);
4852
4853	xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4854	xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4855
4856	xhci_free_command(xhci, config_cmd);
4857	return ret;
4858}
4859
4860int xhci_get_frame(struct usb_hcd *hcd)
4861{
4862	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4863	/* EHCI mods by the periodic size.  Why? */
4864	return readl(&xhci->run_regs->microframe_index) >> 3;
4865}
4866
4867int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4868{
4869	struct xhci_hcd		*xhci;
4870	struct device		*dev = hcd->self.controller;
4871	int			retval;
4872
4873	/* Accept arbitrarily long scatter-gather lists */
4874	hcd->self.sg_tablesize = ~0;
4875
4876	/* support to build packet from discontinuous buffers */
4877	hcd->self.no_sg_constraint = 1;
4878
4879	/* XHCI controllers don't stop the ep queue on short packets :| */
4880	hcd->self.no_stop_on_short = 1;
4881
4882	if (usb_hcd_is_primary_hcd(hcd)) {
4883		xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4884		if (!xhci)
4885			return -ENOMEM;
4886		*((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4887		xhci->main_hcd = hcd;
4888		/* Mark the first roothub as being USB 2.0.
4889		 * The xHCI driver will register the USB 3.0 roothub.
4890		 */
4891		hcd->speed = HCD_USB2;
4892		hcd->self.root_hub->speed = USB_SPEED_HIGH;
4893		/*
4894		 * USB 2.0 roothub under xHCI has an integrated TT,
4895		 * (rate matching hub) as opposed to having an OHCI/UHCI
4896		 * companion controller.
4897		 */
4898		hcd->has_tt = 1;
4899	} else {
4900		/* xHCI private pointer was set in xhci_pci_probe for the second
4901		 * registered roothub.
4902		 */
4903		return 0;
4904	}
4905
4906	mutex_init(&xhci->mutex);
4907	xhci->cap_regs = hcd->regs;
4908	xhci->op_regs = hcd->regs +
4909		HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4910	xhci->run_regs = hcd->regs +
4911		(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4912	/* Cache read-only capability registers */
4913	xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4914	xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4915	xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4916	xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4917	xhci->hci_version = HC_VERSION(xhci->hcc_params);
4918	xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4919	xhci_print_registers(xhci);
4920
4921	xhci->quirks = quirks;
4922
4923	get_quirks(dev, xhci);
4924
4925	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
4926	 * success event after a short transfer. This quirk will ignore such
4927	 * spurious event.
4928	 */
4929	if (xhci->hci_version > 0x96)
4930		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4931
4932	/* Make sure the HC is halted. */
4933	retval = xhci_halt(xhci);
4934	if (retval)
4935		goto error;
4936
4937	xhci_dbg(xhci, "Resetting HCD\n");
4938	/* Reset the internal HC memory state and registers. */
4939	retval = xhci_reset(xhci);
4940	if (retval)
4941		goto error;
4942	xhci_dbg(xhci, "Reset complete\n");
4943
4944	/*
4945	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4946	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4947	 * address memory pointers actually. So, this driver clears the AC64
4948	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4949	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4950	 */
4951	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4952		xhci->hcc_params &= ~BIT(0);
4953
4954	/* Set dma_mask and coherent_dma_mask to 64-bits,
4955	 * if xHC supports 64-bit addressing */
4956	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4957			!dma_set_mask(dev, DMA_BIT_MASK(64))) {
4958		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4959		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4960	}
4961
4962	xhci_dbg(xhci, "Calling HCD init\n");
4963	/* Initialize HCD and host controller data structures. */
4964	retval = xhci_init(hcd);
4965	if (retval)
4966		goto error;
4967	xhci_dbg(xhci, "Called HCD init\n");
4968
4969	xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4970		  xhci->hcc_params, xhci->hci_version, xhci->quirks);
4971
4972	return 0;
4973error:
4974	kfree(xhci);
4975	return retval;
4976}
4977EXPORT_SYMBOL_GPL(xhci_gen_setup);
4978
4979static const struct hc_driver xhci_hc_driver = {
4980	.description =		"xhci-hcd",
4981	.product_desc =		"xHCI Host Controller",
4982	.hcd_priv_size =	sizeof(struct xhci_hcd *),
4983
4984	/*
4985	 * generic hardware linkage
4986	 */
4987	.irq =			xhci_irq,
4988	.flags =		HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4989
4990	/*
4991	 * basic lifecycle operations
4992	 */
4993	.reset =		NULL, /* set in xhci_init_driver() */
4994	.start =		xhci_run,
4995	.stop =			xhci_stop,
4996	.shutdown =		xhci_shutdown,
4997
4998	/*
4999	 * managing i/o requests and associated device resources
5000	 */
5001	.urb_enqueue =		xhci_urb_enqueue,
5002	.urb_dequeue =		xhci_urb_dequeue,
5003	.alloc_dev =		xhci_alloc_dev,
5004	.free_dev =		xhci_free_dev,
5005	.alloc_streams =	xhci_alloc_streams,
5006	.free_streams =		xhci_free_streams,
5007	.add_endpoint =		xhci_add_endpoint,
5008	.drop_endpoint =	xhci_drop_endpoint,
5009	.endpoint_reset =	xhci_endpoint_reset,
5010	.check_bandwidth =	xhci_check_bandwidth,
5011	.reset_bandwidth =	xhci_reset_bandwidth,
5012	.address_device =	xhci_address_device,
5013	.enable_device =	xhci_enable_device,
5014	.update_hub_device =	xhci_update_hub_device,
5015	.reset_device =		xhci_discover_or_reset_device,
5016
5017	/*
5018	 * scheduling support
5019	 */
5020	.get_frame_number =	xhci_get_frame,
5021
5022	/*
5023	 * root hub support
5024	 */
5025	.hub_control =		xhci_hub_control,
5026	.hub_status_data =	xhci_hub_status_data,
5027	.bus_suspend =		xhci_bus_suspend,
5028	.bus_resume =		xhci_bus_resume,
5029
5030	/*
5031	 * call back when device connected and addressed
5032	 */
5033	.update_device =        xhci_update_device,
5034	.set_usb2_hw_lpm =	xhci_set_usb2_hardware_lpm,
5035	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
5036	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
5037	.find_raw_port_number =	xhci_find_raw_port_number,
5038};
5039
5040void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *))
5041{
5042	BUG_ON(!setup_fn);
5043	*drv = xhci_hc_driver;
5044	drv->reset = setup_fn;
5045}
5046EXPORT_SYMBOL_GPL(xhci_init_driver);
5047
5048MODULE_DESCRIPTION(DRIVER_DESC);
5049MODULE_AUTHOR(DRIVER_AUTHOR);
5050MODULE_LICENSE("GPL");
5051
5052static int __init xhci_hcd_init(void)
5053{
5054	/*
5055	 * Check the compiler generated sizes of structures that must be laid
5056	 * out in specific ways for hardware access.
5057	 */
5058	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5059	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5060	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5061	/* xhci_device_control has eight fields, and also
5062	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5063	 */
5064	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5065	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5066	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5067	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
5068	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5069	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5070	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5071
5072	if (usb_disabled())
5073		return -ENODEV;
5074
5075	return 0;
5076}
5077
5078/*
5079 * If an init function is provided, an exit function must also be provided
5080 * to allow module unload.
5081 */
5082static void __exit xhci_hcd_fini(void) { }
5083
5084module_init(xhci_hcd_init);
5085module_exit(xhci_hcd_fini);
5086