This source file includes following definitions.
- td_on_ring
- xhci_handshake
- xhci_quiesce
- xhci_halt
- xhci_start
- xhci_reset
- xhci_zero_64b_regs
- xhci_setup_msi
- xhci_setup_msix
- xhci_cleanup_msix
- xhci_msix_sync_irqs
- xhci_try_enable_msi
- xhci_try_enable_msi
- xhci_cleanup_msix
- xhci_msix_sync_irqs
- compliance_mode_recovery
- compliance_mode_recovery_timer_init
- xhci_compliance_mode_recovery_timer_quirk_check
- xhci_all_ports_seen_u0
- xhci_init
- xhci_run_finished
- xhci_run
- xhci_stop
- xhci_shutdown
- xhci_save_registers
- xhci_restore_registers
- xhci_set_cmd_ring_deq
- xhci_clear_command_ring
- xhci_disable_port_wake_on_bits
- xhci_pending_portevent
- xhci_suspend
- xhci_resume
- xhci_map_urb_for_dma
- xhci_get_endpoint_index
- xhci_get_endpoint_address
- xhci_get_endpoint_flag
- xhci_get_endpoint_flag_from_index
- xhci_last_valid_endpoint
- xhci_check_args
- xhci_check_maxpacket
- xhci_urb_enqueue
- xhci_urb_dequeue
- xhci_drop_endpoint
- xhci_add_endpoint
- xhci_zero_in_ctx
- xhci_configure_endpoint_result
- xhci_evaluate_context_result
- xhci_count_num_new_endpoints
- xhci_count_num_dropped_endpoints
- xhci_reserve_host_resources
- xhci_free_host_resources
- xhci_finish_resource_reservation
- xhci_get_block_size
- xhci_get_largest_overhead
- xhci_check_tt_bw_table
- xhci_check_ss_bw
- xhci_check_bw_table
- xhci_is_async_ep
- xhci_is_sync_in_ep
- xhci_get_ss_bw_consumed
- xhci_drop_ep_from_interval_table
- xhci_add_ep_to_interval_table
- xhci_update_tt_active_eps
- xhci_reserve_bandwidth
- xhci_configure_endpoint
- xhci_check_bw_drop_ep_streams
- xhci_check_bandwidth
- xhci_reset_bandwidth
- xhci_setup_input_ctx_for_config_ep
- xhci_setup_input_ctx_for_quirk
- xhci_cleanup_stalled_ring
- xhci_endpoint_disable
- xhci_endpoint_reset
- xhci_check_streams_endpoint
- xhci_calculate_streams_entries
- xhci_calculate_streams_and_bitmask
- xhci_calculate_no_streams_bitmask
- xhci_alloc_streams
- xhci_free_streams
- xhci_free_device_endpoint_resources
- xhci_discover_or_reset_device
- xhci_free_dev
- xhci_disable_slot
- xhci_reserve_host_control_ep_resources
- xhci_alloc_dev
- xhci_setup_device
- xhci_address_device
- xhci_enable_device
- xhci_find_raw_port_number
- xhci_change_max_exit_latency
- xhci_calculate_hird_besl
- xhci_calculate_usb2_hw_lpm_params
- xhci_set_usb2_hardware_lpm
- xhci_check_usb2_port_capability
- xhci_update_device
- xhci_service_interval_to_ns
- xhci_get_timeout_no_hub_lpm
- xhci_calculate_intel_u1_timeout
- xhci_calculate_u1_timeout
- xhci_calculate_intel_u2_timeout
- xhci_calculate_u2_timeout
- xhci_call_host_update_timeout_for_endpoint
- xhci_update_timeout_for_endpoint
- xhci_update_timeout_for_interface
- xhci_check_intel_tier_policy
- xhci_check_tier_policy
- xhci_calculate_lpm_timeout
- calculate_max_exit_latency
- xhci_enable_usb3_lpm_timeout
- xhci_disable_usb3_lpm_timeout
- xhci_set_usb2_hardware_lpm
- xhci_update_device
- xhci_enable_usb3_lpm_timeout
- xhci_disable_usb3_lpm_timeout
- xhci_update_hub_device
- xhci_get_frame
- xhci_gen_setup
- xhci_clear_tt_buffer_complete
- xhci_init_driver
- xhci_hcd_init
- xhci_hcd_fini
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 #include <linux/pci.h>
  12 #include <linux/iopoll.h>
  13 #include <linux/irq.h>
  14 #include <linux/log2.h>
  15 #include <linux/module.h>
  16 #include <linux/moduleparam.h>
  17 #include <linux/slab.h>
  18 #include <linux/dmi.h>
  19 #include <linux/dma-mapping.h>
  20 
  21 #include "xhci.h"
  22 #include "xhci-trace.h"
  23 #include "xhci-mtk.h"
  24 #include "xhci-debugfs.h"
  25 #include "xhci-dbgcap.h"
  26 
  27 #define DRIVER_AUTHOR "Sarah Sharp"
  28 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  29 
  30 #define PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
  31 
  32 
  33 static int link_quirk;
  34 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  36 
  37 static unsigned long long quirks;
  38 module_param(quirks, ullong, S_IRUGO);
  39 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
  40 
  41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
  42 {
  43         struct xhci_segment *seg = ring->first_seg;
  44 
  45         if (!td || !td->start_seg)
  46                 return false;
  47         do {
  48                 if (seg == td->start_seg)
  49                         return true;
  50                 seg = seg->next;
  51         } while (seg && seg != ring->first_seg);
  52 
  53         return false;
  54 }
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 
  67 
  68 
  69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
  70 {
  71         u32     result;
  72         int     ret;
  73 
  74         ret = readl_poll_timeout_atomic(ptr, result,
  75                                         (result & mask) == done ||
  76                                         result == U32_MAX,
  77                                         1, usec);
  78         if (result == U32_MAX)          
  79                 return -ENODEV;
  80 
  81         return ret;
  82 }
  83 
  84 
  85 
  86 
  87 void xhci_quiesce(struct xhci_hcd *xhci)
  88 {
  89         u32 halted;
  90         u32 cmd;
  91         u32 mask;
  92 
  93         mask = ~(XHCI_IRQS);
  94         halted = readl(&xhci->op_regs->status) & STS_HALT;
  95         if (!halted)
  96                 mask &= ~CMD_RUN;
  97 
  98         cmd = readl(&xhci->op_regs->command);
  99         cmd &= mask;
 100         writel(cmd, &xhci->op_regs->command);
 101 }
 102 
 103 
 104 
 105 
 106 
 107 
 108 
 109 
 110 
 111 int xhci_halt(struct xhci_hcd *xhci)
 112 {
 113         int ret;
 114         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
 115         xhci_quiesce(xhci);
 116 
 117         ret = xhci_handshake(&xhci->op_regs->status,
 118                         STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 119         if (ret) {
 120                 xhci_warn(xhci, "Host halt failed, %d\n", ret);
 121                 return ret;
 122         }
 123         xhci->xhc_state |= XHCI_STATE_HALTED;
 124         xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 125         return ret;
 126 }
 127 
 128 
 129 
 130 
 131 int xhci_start(struct xhci_hcd *xhci)
 132 {
 133         u32 temp;
 134         int ret;
 135 
 136         temp = readl(&xhci->op_regs->command);
 137         temp |= (CMD_RUN);
 138         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
 139                         temp);
 140         writel(temp, &xhci->op_regs->command);
 141 
 142         
 143 
 144 
 145 
 146         ret = xhci_handshake(&xhci->op_regs->status,
 147                         STS_HALT, 0, XHCI_MAX_HALT_USEC);
 148         if (ret == -ETIMEDOUT)
 149                 xhci_err(xhci, "Host took too long to start, "
 150                                 "waited %u microseconds.\n",
 151                                 XHCI_MAX_HALT_USEC);
 152         if (!ret)
 153                 
 154                 xhci->xhc_state = 0;
 155 
 156         return ret;
 157 }
 158 
 159 
 160 
 161 
 162 
 163 
 164 
 165 
 166 int xhci_reset(struct xhci_hcd *xhci)
 167 {
 168         u32 command;
 169         u32 state;
 170         int ret;
 171 
 172         state = readl(&xhci->op_regs->status);
 173 
 174         if (state == ~(u32)0) {
 175                 xhci_warn(xhci, "Host not accessible, reset failed.\n");
 176                 return -ENODEV;
 177         }
 178 
 179         if ((state & STS_HALT) == 0) {
 180                 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
 181                 return 0;
 182         }
 183 
 184         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
 185         command = readl(&xhci->op_regs->command);
 186         command |= CMD_RESET;
 187         writel(command, &xhci->op_regs->command);
 188 
 189         
 190 
 191 
 192 
 193 
 194 
 195 
 196         if (xhci->quirks & XHCI_INTEL_HOST)
 197                 udelay(1000);
 198 
 199         ret = xhci_handshake(&xhci->op_regs->command,
 200                         CMD_RESET, 0, 10 * 1000 * 1000);
 201         if (ret)
 202                 return ret;
 203 
 204         if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
 205                 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
 206 
 207         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 208                          "Wait for controller to be ready for doorbell rings");
 209         
 210 
 211 
 212 
 213         ret = xhci_handshake(&xhci->op_regs->status,
 214                         STS_CNR, 0, 10 * 1000 * 1000);
 215 
 216         xhci->usb2_rhub.bus_state.port_c_suspend = 0;
 217         xhci->usb2_rhub.bus_state.suspended_ports = 0;
 218         xhci->usb2_rhub.bus_state.resuming_ports = 0;
 219         xhci->usb3_rhub.bus_state.port_c_suspend = 0;
 220         xhci->usb3_rhub.bus_state.suspended_ports = 0;
 221         xhci->usb3_rhub.bus_state.resuming_ports = 0;
 222 
 223         return ret;
 224 }
 225 
 226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
 227 {
 228         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 229         int err, i;
 230         u64 val;
 231 
 232         
 233 
 234 
 235 
 236 
 237 
 238 
 239 
 240 
 241 
 242 
 243 
 244 
 245 
 246         if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
 247                 return;
 248 
 249         xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
 250 
 251         
 252         val = readl(&xhci->op_regs->command);
 253         val &= ~CMD_HSEIE;
 254         writel(val, &xhci->op_regs->command);
 255 
 256         
 257         val = readl(&xhci->op_regs->status);
 258         val |= STS_FATAL;
 259         writel(val, &xhci->op_regs->status);
 260 
 261         
 262         val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 263         if (upper_32_bits(val))
 264                 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
 265         val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 266         if (upper_32_bits(val))
 267                 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
 268 
 269         for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
 270                 struct xhci_intr_reg __iomem *ir;
 271 
 272                 ir = &xhci->run_regs->ir_set[i];
 273                 val = xhci_read_64(xhci, &ir->erst_base);
 274                 if (upper_32_bits(val))
 275                         xhci_write_64(xhci, 0, &ir->erst_base);
 276                 val= xhci_read_64(xhci, &ir->erst_dequeue);
 277                 if (upper_32_bits(val))
 278                         xhci_write_64(xhci, 0, &ir->erst_dequeue);
 279         }
 280 
 281         
 282         err = xhci_handshake(&xhci->op_regs->status,
 283                              STS_FATAL, STS_FATAL,
 284                              XHCI_MAX_HALT_USEC);
 285         if (!err)
 286                 xhci_info(xhci, "Fault detected\n");
 287 }
 288 
 289 #ifdef CONFIG_USB_PCI
 290 
 291 
 292 
 293 static int xhci_setup_msi(struct xhci_hcd *xhci)
 294 {
 295         int ret;
 296         
 297 
 298 
 299         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 300 
 301         ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
 302         if (ret < 0) {
 303                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 304                                 "failed to allocate MSI entry");
 305                 return ret;
 306         }
 307 
 308         ret = request_irq(pdev->irq, xhci_msi_irq,
 309                                 0, "xhci_hcd", xhci_to_hcd(xhci));
 310         if (ret) {
 311                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 312                                 "disable MSI interrupt");
 313                 pci_free_irq_vectors(pdev);
 314         }
 315 
 316         return ret;
 317 }
 318 
 319 
 320 
 321 
 322 static int xhci_setup_msix(struct xhci_hcd *xhci)
 323 {
 324         int i, ret = 0;
 325         struct usb_hcd *hcd = xhci_to_hcd(xhci);
 326         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 327 
 328         
 329 
 330 
 331 
 332 
 333 
 334 
 335         xhci->msix_count = min(num_online_cpus() + 1,
 336                                 HCS_MAX_INTRS(xhci->hcs_params1));
 337 
 338         ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
 339                         PCI_IRQ_MSIX);
 340         if (ret < 0) {
 341                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 342                                 "Failed to enable MSI-X");
 343                 return ret;
 344         }
 345 
 346         for (i = 0; i < xhci->msix_count; i++) {
 347                 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
 348                                 "xhci_hcd", xhci_to_hcd(xhci));
 349                 if (ret)
 350                         goto disable_msix;
 351         }
 352 
 353         hcd->msix_enabled = 1;
 354         return ret;
 355 
 356 disable_msix:
 357         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
 358         while (--i >= 0)
 359                 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
 360         pci_free_irq_vectors(pdev);
 361         return ret;
 362 }
 363 
 364 
 365 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 366 {
 367         struct usb_hcd *hcd = xhci_to_hcd(xhci);
 368         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 369 
 370         if (xhci->quirks & XHCI_PLAT)
 371                 return;
 372 
 373         
 374         if (hcd->irq > 0)
 375                 return;
 376 
 377         if (hcd->msix_enabled) {
 378                 int i;
 379 
 380                 for (i = 0; i < xhci->msix_count; i++)
 381                         free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
 382         } else {
 383                 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
 384         }
 385 
 386         pci_free_irq_vectors(pdev);
 387         hcd->msix_enabled = 0;
 388 }
 389 
 390 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 391 {
 392         struct usb_hcd *hcd = xhci_to_hcd(xhci);
 393 
 394         if (hcd->msix_enabled) {
 395                 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 396                 int i;
 397 
 398                 for (i = 0; i < xhci->msix_count; i++)
 399                         synchronize_irq(pci_irq_vector(pdev, i));
 400         }
 401 }
 402 
 403 static int xhci_try_enable_msi(struct usb_hcd *hcd)
 404 {
 405         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 406         struct pci_dev  *pdev;
 407         int ret;
 408 
 409         
 410         if (xhci->quirks & XHCI_PLAT)
 411                 return 0;
 412 
 413         pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 414         
 415 
 416 
 417 
 418         if (xhci->quirks & XHCI_BROKEN_MSI)
 419                 goto legacy_irq;
 420 
 421         
 422         if (hcd->irq)
 423                 free_irq(hcd->irq, hcd);
 424         hcd->irq = 0;
 425 
 426         ret = xhci_setup_msix(xhci);
 427         if (ret)
 428                 
 429                 ret = xhci_setup_msi(xhci);
 430 
 431         if (!ret) {
 432                 hcd->msi_enabled = 1;
 433                 return 0;
 434         }
 435 
 436         if (!pdev->irq) {
 437                 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
 438                 return -EINVAL;
 439         }
 440 
 441  legacy_irq:
 442         if (!strlen(hcd->irq_descr))
 443                 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
 444                          hcd->driver->description, hcd->self.busnum);
 445 
 446         
 447         ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
 448                         hcd->irq_descr, hcd);
 449         if (ret) {
 450                 xhci_err(xhci, "request interrupt %d failed\n",
 451                                 pdev->irq);
 452                 return ret;
 453         }
 454         hcd->irq = pdev->irq;
 455         return 0;
 456 }
 457 
 458 #else
 459 
 460 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
 461 {
 462         return 0;
 463 }
 464 
 465 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
 466 {
 467 }
 468 
 469 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 470 {
 471 }
 472 
 473 #endif
 474 
 475 static void compliance_mode_recovery(struct timer_list *t)
 476 {
 477         struct xhci_hcd *xhci;
 478         struct usb_hcd *hcd;
 479         struct xhci_hub *rhub;
 480         u32 temp;
 481         int i;
 482 
 483         xhci = from_timer(xhci, t, comp_mode_recovery_timer);
 484         rhub = &xhci->usb3_rhub;
 485 
 486         for (i = 0; i < rhub->num_ports; i++) {
 487                 temp = readl(rhub->ports[i]->addr);
 488                 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
 489                         
 490 
 491 
 492 
 493                         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 494                                         "Compliance mode detected->port %d",
 495                                         i + 1);
 496                         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 497                                         "Attempting compliance mode recovery");
 498                         hcd = xhci->shared_hcd;
 499 
 500                         if (hcd->state == HC_STATE_SUSPENDED)
 501                                 usb_hcd_resume_root_hub(hcd);
 502 
 503                         usb_hcd_poll_rh_status(hcd);
 504                 }
 505         }
 506 
 507         if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
 508                 mod_timer(&xhci->comp_mode_recovery_timer,
 509                         jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
 510 }
 511 
 512 
 513 
 514 
 515 
 516 
 517 
 518 
 519 
 520 
 521 
 522 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
 523 {
 524         xhci->port_status_u0 = 0;
 525         timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
 526                     0);
 527         xhci->comp_mode_recovery_timer.expires = jiffies +
 528                         msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 529 
 530         add_timer(&xhci->comp_mode_recovery_timer);
 531         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 532                         "Compliance mode recovery timer initialized");
 533 }
 534 
 535 
 536 
 537 
 538 
 539 
 540 
 541 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
 542 {
 543         const char *dmi_product_name, *dmi_sys_vendor;
 544 
 545         dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
 546         dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
 547         if (!dmi_product_name || !dmi_sys_vendor)
 548                 return false;
 549 
 550         if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
 551                 return false;
 552 
 553         if (strstr(dmi_product_name, "Z420") ||
 554                         strstr(dmi_product_name, "Z620") ||
 555                         strstr(dmi_product_name, "Z820") ||
 556                         strstr(dmi_product_name, "Z1 Workstation"))
 557                 return true;
 558 
 559         return false;
 560 }
 561 
 562 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
 563 {
 564         return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
 565 }
 566 
 567 
 568 
 569 
 570 
 571 
 572 
 573 
 574 
 575 static int xhci_init(struct usb_hcd *hcd)
 576 {
 577         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 578         int retval = 0;
 579 
 580         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
 581         spin_lock_init(&xhci->lock);
 582         if (xhci->hci_version == 0x95 && link_quirk) {
 583                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 584                                 "QUIRK: Not clearing Link TRB chain bits.");
 585                 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
 586         } else {
 587                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 588                                 "xHCI doesn't need link TRB QUIRK");
 589         }
 590         retval = xhci_mem_init(xhci, GFP_KERNEL);
 591         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
 592 
 593         
 594         if (xhci_compliance_mode_recovery_timer_quirk_check()) {
 595                 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
 596                 compliance_mode_recovery_timer_init(xhci);
 597         }
 598 
 599         return retval;
 600 }
 601 
 602 
 603 
 604 
 605 static int xhci_run_finished(struct xhci_hcd *xhci)
 606 {
 607         if (xhci_start(xhci)) {
 608                 xhci_halt(xhci);
 609                 return -ENODEV;
 610         }
 611         xhci->shared_hcd->state = HC_STATE_RUNNING;
 612         xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
 613 
 614         if (xhci->quirks & XHCI_NEC_HOST)
 615                 xhci_ring_cmd_db(xhci);
 616 
 617         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 618                         "Finished xhci_run for USB3 roothub");
 619         return 0;
 620 }
 621 
 622 
 623 
 624 
 625 
 626 
 627 
 628 
 629 
 630 
 631 
 632 
 633 
 634 int xhci_run(struct usb_hcd *hcd)
 635 {
 636         u32 temp;
 637         u64 temp_64;
 638         int ret;
 639         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 640 
 641         
 642 
 643 
 644 
 645         hcd->uses_new_polling = 1;
 646         if (!usb_hcd_is_primary_hcd(hcd))
 647                 return xhci_run_finished(xhci);
 648 
 649         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
 650 
 651         ret = xhci_try_enable_msi(hcd);
 652         if (ret)
 653                 return ret;
 654 
 655         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 656         temp_64 &= ~ERST_PTR_MASK;
 657         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 658                         "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
 659 
 660         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 661                         "// Set the interrupt modulation register");
 662         temp = readl(&xhci->ir_set->irq_control);
 663         temp &= ~ER_IRQ_INTERVAL_MASK;
 664         temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
 665         writel(temp, &xhci->ir_set->irq_control);
 666 
 667         
 668         temp = readl(&xhci->op_regs->command);
 669         temp |= (CMD_EIE);
 670         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 671                         "// Enable interrupts, cmd = 0x%x.", temp);
 672         writel(temp, &xhci->op_regs->command);
 673 
 674         temp = readl(&xhci->ir_set->irq_pending);
 675         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 676                         "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
 677                         xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 678         writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
 679 
 680         if (xhci->quirks & XHCI_NEC_HOST) {
 681                 struct xhci_command *command;
 682 
 683                 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
 684                 if (!command)
 685                         return -ENOMEM;
 686 
 687                 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
 688                                 TRB_TYPE(TRB_NEC_GET_FW));
 689                 if (ret)
 690                         xhci_free_command(xhci, command);
 691         }
 692         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 693                         "Finished xhci_run for USB2 roothub");
 694 
 695         xhci_dbc_init(xhci);
 696 
 697         xhci_debugfs_init(xhci);
 698 
 699         return 0;
 700 }
 701 EXPORT_SYMBOL_GPL(xhci_run);
 702 
 703 
 704 
 705 
 706 
 707 
 708 
 709 
 710 
 711 
 712 static void xhci_stop(struct usb_hcd *hcd)
 713 {
 714         u32 temp;
 715         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 716 
 717         mutex_lock(&xhci->mutex);
 718 
 719         
 720         if (!usb_hcd_is_primary_hcd(hcd)) {
 721                 mutex_unlock(&xhci->mutex);
 722                 return;
 723         }
 724 
 725         xhci_dbc_exit(xhci);
 726 
 727         spin_lock_irq(&xhci->lock);
 728         xhci->xhc_state |= XHCI_STATE_HALTED;
 729         xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 730         xhci_halt(xhci);
 731         xhci_reset(xhci);
 732         spin_unlock_irq(&xhci->lock);
 733 
 734         xhci_cleanup_msix(xhci);
 735 
 736         
 737         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
 738                         (!(xhci_all_ports_seen_u0(xhci)))) {
 739                 del_timer_sync(&xhci->comp_mode_recovery_timer);
 740                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 741                                 "%s: compliance mode recovery timer deleted",
 742                                 __func__);
 743         }
 744 
 745         if (xhci->quirks & XHCI_AMD_PLL_FIX)
 746                 usb_amd_dev_put();
 747 
 748         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 749                         "// Disabling event ring interrupts");
 750         temp = readl(&xhci->op_regs->status);
 751         writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
 752         temp = readl(&xhci->ir_set->irq_pending);
 753         writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
 754 
 755         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
 756         xhci_mem_cleanup(xhci);
 757         xhci_debugfs_exit(xhci);
 758         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 759                         "xhci_stop completed - status = %x",
 760                         readl(&xhci->op_regs->status));
 761         mutex_unlock(&xhci->mutex);
 762 }
 763 
 764 
 765 
 766 
 767 
 768 
 769 
 770 
 771 
 772 
 773 void xhci_shutdown(struct usb_hcd *hcd)
 774 {
 775         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 776 
 777         if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
 778                 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
 779 
 780         spin_lock_irq(&xhci->lock);
 781         xhci_halt(xhci);
 782         
 783         if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
 784                 xhci_reset(xhci);
 785         spin_unlock_irq(&xhci->lock);
 786 
 787         xhci_cleanup_msix(xhci);
 788 
 789         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 790                         "xhci_shutdown completed - status = %x",
 791                         readl(&xhci->op_regs->status));
 792 }
 793 EXPORT_SYMBOL_GPL(xhci_shutdown);
 794 
 795 #ifdef CONFIG_PM
 796 static void xhci_save_registers(struct xhci_hcd *xhci)
 797 {
 798         xhci->s3.command = readl(&xhci->op_regs->command);
 799         xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
 800         xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 801         xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
 802         xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
 803         xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 804         xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 805         xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
 806         xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
 807 }
 808 
 809 static void xhci_restore_registers(struct xhci_hcd *xhci)
 810 {
 811         writel(xhci->s3.command, &xhci->op_regs->command);
 812         writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
 813         xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 814         writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
 815         writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
 816         xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
 817         xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
 818         writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
 819         writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
 820 }
 821 
 822 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
 823 {
 824         u64     val_64;
 825 
 826         
 827         val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 828         val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 829                 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
 830                                       xhci->cmd_ring->dequeue) &
 831                  (u64) ~CMD_RING_RSVD_BITS) |
 832                 xhci->cmd_ring->cycle_state;
 833         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 834                         "// Setting command ring address to 0x%llx",
 835                         (long unsigned long) val_64);
 836         xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 837 }
 838 
 839 
 840 
 841 
 842 
 843 
 844 
 845 
 846 
 847 
 848 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 849 {
 850         struct xhci_ring *ring;
 851         struct xhci_segment *seg;
 852 
 853         ring = xhci->cmd_ring;
 854         seg = ring->deq_seg;
 855         do {
 856                 memset(seg->trbs, 0,
 857                         sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
 858                 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
 859                         cpu_to_le32(~TRB_CYCLE);
 860                 seg = seg->next;
 861         } while (seg != ring->deq_seg);
 862 
 863         
 864         ring->deq_seg = ring->first_seg;
 865         ring->dequeue = ring->first_seg->trbs;
 866         ring->enq_seg = ring->deq_seg;
 867         ring->enqueue = ring->dequeue;
 868 
 869         ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
 870         
 871 
 872 
 873 
 874         ring->cycle_state = 1;
 875 
 876         
 877 
 878 
 879 
 880 
 881 
 882 
 883         xhci_set_cmd_ring_deq(xhci);
 884 }
 885 
 886 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
 887 {
 888         struct xhci_port **ports;
 889         int port_index;
 890         unsigned long flags;
 891         u32 t1, t2, portsc;
 892 
 893         spin_lock_irqsave(&xhci->lock, flags);
 894 
 895         
 896         port_index = xhci->usb3_rhub.num_ports;
 897         ports = xhci->usb3_rhub.ports;
 898         while (port_index--) {
 899                 t1 = readl(ports[port_index]->addr);
 900                 portsc = t1;
 901                 t1 = xhci_port_state_to_neutral(t1);
 902                 t2 = t1 & ~PORT_WAKE_BITS;
 903                 if (t1 != t2) {
 904                         writel(t2, ports[port_index]->addr);
 905                         xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
 906                                  xhci->usb3_rhub.hcd->self.busnum,
 907                                  port_index + 1, portsc, t2);
 908                 }
 909         }
 910 
 911         
 912         port_index = xhci->usb2_rhub.num_ports;
 913         ports = xhci->usb2_rhub.ports;
 914         while (port_index--) {
 915                 t1 = readl(ports[port_index]->addr);
 916                 portsc = t1;
 917                 t1 = xhci_port_state_to_neutral(t1);
 918                 t2 = t1 & ~PORT_WAKE_BITS;
 919                 if (t1 != t2) {
 920                         writel(t2, ports[port_index]->addr);
 921                         xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
 922                                  xhci->usb2_rhub.hcd->self.busnum,
 923                                  port_index + 1, portsc, t2);
 924                 }
 925         }
 926         spin_unlock_irqrestore(&xhci->lock, flags);
 927 }
 928 
 929 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
 930 {
 931         struct xhci_port        **ports;
 932         int                     port_index;
 933         u32                     status;
 934         u32                     portsc;
 935 
 936         status = readl(&xhci->op_regs->status);
 937         if (status & STS_EINT)
 938                 return true;
 939         
 940 
 941 
 942 
 943 
 944 
 945         port_index = xhci->usb2_rhub.num_ports;
 946         ports = xhci->usb2_rhub.ports;
 947         while (port_index--) {
 948                 portsc = readl(ports[port_index]->addr);
 949                 if (portsc & PORT_CHANGE_MASK ||
 950                     (portsc & PORT_PLS_MASK) == XDEV_RESUME)
 951                         return true;
 952         }
 953         port_index = xhci->usb3_rhub.num_ports;
 954         ports = xhci->usb3_rhub.ports;
 955         while (port_index--) {
 956                 portsc = readl(ports[port_index]->addr);
 957                 if (portsc & PORT_CHANGE_MASK ||
 958                     (portsc & PORT_PLS_MASK) == XDEV_RESUME)
 959                         return true;
 960         }
 961         return false;
 962 }
 963 
 964 
 965 
 966 
 967 
 968 
 969 
 970 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 971 {
 972         int                     rc = 0;
 973         unsigned int            delay = XHCI_MAX_HALT_USEC * 2;
 974         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
 975         u32                     command;
 976         u32                     res;
 977 
 978         if (!hcd->state)
 979                 return 0;
 980 
 981         if (hcd->state != HC_STATE_SUSPENDED ||
 982                         xhci->shared_hcd->state != HC_STATE_SUSPENDED)
 983                 return -EINVAL;
 984 
 985         xhci_dbc_suspend(xhci);
 986 
 987         
 988         if (!do_wakeup)
 989                 xhci_disable_port_wake_on_bits(xhci);
 990 
 991         
 992         xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
 993         clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
 994         del_timer_sync(&hcd->rh_timer);
 995         clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 996         del_timer_sync(&xhci->shared_hcd->rh_timer);
 997 
 998         if (xhci->quirks & XHCI_SUSPEND_DELAY)
 999                 usleep_range(1000, 1500);
1000 
1001         spin_lock_irq(&xhci->lock);
1002         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1003         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1004         
1005         
1006 
1007         
1008         command = readl(&xhci->op_regs->command);
1009         command &= ~CMD_RUN;
1010         writel(command, &xhci->op_regs->command);
1011 
1012         
1013         delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1014 
1015         if (xhci_handshake(&xhci->op_regs->status,
1016                       STS_HALT, STS_HALT, delay)) {
1017                 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1018                 spin_unlock_irq(&xhci->lock);
1019                 return -ETIMEDOUT;
1020         }
1021         xhci_clear_command_ring(xhci);
1022 
1023         
1024         xhci_save_registers(xhci);
1025 
1026         
1027         command = readl(&xhci->op_regs->command);
1028         command |= CMD_CSS;
1029         writel(command, &xhci->op_regs->command);
1030         xhci->broken_suspend = 0;
1031         if (xhci_handshake(&xhci->op_regs->status,
1032                                 STS_SAVE, 0, 20 * 1000)) {
1033         
1034 
1035 
1036 
1037 
1038 
1039 
1040 
1041 
1042                 res = readl(&xhci->op_regs->status);
1043                 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1044                     (((res & STS_SRE) == 0) &&
1045                                 ((res & STS_HCE) == 0))) {
1046                         xhci->broken_suspend = 1;
1047                 } else {
1048                         xhci_warn(xhci, "WARN: xHC save state timeout\n");
1049                         spin_unlock_irq(&xhci->lock);
1050                         return -ETIMEDOUT;
1051                 }
1052         }
1053         spin_unlock_irq(&xhci->lock);
1054 
1055         
1056 
1057 
1058 
1059         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1060                         (!(xhci_all_ports_seen_u0(xhci)))) {
1061                 del_timer_sync(&xhci->comp_mode_recovery_timer);
1062                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1063                                 "%s: compliance mode recovery timer deleted",
1064                                 __func__);
1065         }
1066 
1067         
1068         
1069         xhci_msix_sync_irqs(xhci);
1070 
1071         return rc;
1072 }
1073 EXPORT_SYMBOL_GPL(xhci_suspend);
1074 
1075 
1076 
1077 
1078 
1079 
1080 
1081 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1082 {
1083         u32                     command, temp = 0;
1084         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
1085         struct usb_hcd          *secondary_hcd;
1086         int                     retval = 0;
1087         bool                    comp_timer_running = false;
1088 
1089         if (!hcd->state)
1090                 return 0;
1091 
1092         
1093 
1094 
1095 
1096         if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1097             time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1098                 msleep(100);
1099 
1100         set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1101         set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1102 
1103         spin_lock_irq(&xhci->lock);
1104         if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1105                 hibernated = true;
1106 
1107         if (!hibernated) {
1108                 
1109 
1110 
1111 
1112                 retval = xhci_handshake(&xhci->op_regs->status,
1113                                         STS_CNR, 0, 10 * 1000 * 1000);
1114                 if (retval) {
1115                         xhci_warn(xhci, "Controller not ready at resume %d\n",
1116                                   retval);
1117                         spin_unlock_irq(&xhci->lock);
1118                         return retval;
1119                 }
1120                 
1121                 xhci_restore_registers(xhci);
1122                 
1123                 xhci_set_cmd_ring_deq(xhci);
1124                 
1125                 
1126                 command = readl(&xhci->op_regs->command);
1127                 command |= CMD_CRS;
1128                 writel(command, &xhci->op_regs->command);
1129                 
1130 
1131 
1132 
1133 
1134                 if (xhci_handshake(&xhci->op_regs->status,
1135                               STS_RESTORE, 0, 100 * 1000)) {
1136                         xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1137                         spin_unlock_irq(&xhci->lock);
1138                         return -ETIMEDOUT;
1139                 }
1140                 temp = readl(&xhci->op_regs->status);
1141         }
1142 
1143         
1144         if ((temp & STS_SRE) || hibernated) {
1145 
1146                 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1147                                 !(xhci_all_ports_seen_u0(xhci))) {
1148                         del_timer_sync(&xhci->comp_mode_recovery_timer);
1149                         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1150                                 "Compliance Mode Recovery Timer deleted!");
1151                 }
1152 
1153                 
1154                 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1155                 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1156 
1157                 xhci_dbg(xhci, "Stop HCD\n");
1158                 xhci_halt(xhci);
1159                 xhci_zero_64b_regs(xhci);
1160                 retval = xhci_reset(xhci);
1161                 spin_unlock_irq(&xhci->lock);
1162                 if (retval)
1163                         return retval;
1164                 xhci_cleanup_msix(xhci);
1165 
1166                 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1167                 temp = readl(&xhci->op_regs->status);
1168                 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1169                 temp = readl(&xhci->ir_set->irq_pending);
1170                 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1171 
1172                 xhci_dbg(xhci, "cleaning up memory\n");
1173                 xhci_mem_cleanup(xhci);
1174                 xhci_debugfs_exit(xhci);
1175                 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1176                             readl(&xhci->op_regs->status));
1177 
1178                 
1179 
1180 
1181 
1182                 if (!usb_hcd_is_primary_hcd(hcd))
1183                         secondary_hcd = hcd;
1184                 else
1185                         secondary_hcd = xhci->shared_hcd;
1186 
1187                 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1188                 retval = xhci_init(hcd->primary_hcd);
1189                 if (retval)
1190                         return retval;
1191                 comp_timer_running = true;
1192 
1193                 xhci_dbg(xhci, "Start the primary HCD\n");
1194                 retval = xhci_run(hcd->primary_hcd);
1195                 if (!retval) {
1196                         xhci_dbg(xhci, "Start the secondary HCD\n");
1197                         retval = xhci_run(secondary_hcd);
1198                 }
1199                 hcd->state = HC_STATE_SUSPENDED;
1200                 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1201                 goto done;
1202         }
1203 
1204         
1205         command = readl(&xhci->op_regs->command);
1206         command |= CMD_RUN;
1207         writel(command, &xhci->op_regs->command);
1208         xhci_handshake(&xhci->op_regs->status, STS_HALT,
1209                   0, 250 * 1000);
1210 
1211         
1212 
1213 
1214         
1215 
1216         
1217 
1218 
1219 
1220         spin_unlock_irq(&xhci->lock);
1221 
1222         xhci_dbc_resume(xhci);
1223 
1224  done:
1225         if (retval == 0) {
1226                 
1227                 if (xhci_pending_portevent(xhci)) {
1228                         usb_hcd_resume_root_hub(xhci->shared_hcd);
1229                         usb_hcd_resume_root_hub(hcd);
1230                 }
1231         }
1232 
1233         
1234 
1235 
1236 
1237 
1238 
1239         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1240                 compliance_mode_recovery_timer_init(xhci);
1241 
1242         if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1243                 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1244 
1245         
1246         xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1247         set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1248         usb_hcd_poll_rh_status(xhci->shared_hcd);
1249         set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1250         usb_hcd_poll_rh_status(hcd);
1251 
1252         return retval;
1253 }
1254 EXPORT_SYMBOL_GPL(xhci_resume);
1255 #endif  
1256 
1257 
1258 
1259 
1260 
1261 
1262 
1263 
1264 
1265 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1266                                 gfp_t mem_flags)
1267 {
1268         if (xhci_urb_suitable_for_idt(urb))
1269                 return 0;
1270 
1271         return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1272 }
1273 
1274 
1275 
1276 
1277 
1278 
1279 
1280 
1281 
1282 
1283 
1284 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1285 {
1286         unsigned int index;
1287         if (usb_endpoint_xfer_control(desc))
1288                 index = (unsigned int) (usb_endpoint_num(desc)*2);
1289         else
1290                 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1291                         (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1292         return index;
1293 }
1294 
1295 
1296 
1297 
1298 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1299 {
1300         unsigned int number = DIV_ROUND_UP(ep_index, 2);
1301         unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1302         return direction | number;
1303 }
1304 
1305 
1306 
1307 
1308 
1309 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1310 {
1311         return 1 << (xhci_get_endpoint_index(desc) + 1);
1312 }
1313 
1314 
1315 
1316 
1317 
1318 static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1319 {
1320         return 1 << (ep_index + 1);
1321 }
1322 
1323 
1324 
1325 
1326 
1327 
1328 
1329 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1330 {
1331         return fls(added_ctxs) - 1;
1332 }
1333 
1334 
1335 
1336 
1337 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1338                 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1339                 const char *func) {
1340         struct xhci_hcd *xhci;
1341         struct xhci_virt_device *virt_dev;
1342 
1343         if (!hcd || (check_ep && !ep) || !udev) {
1344                 pr_debug("xHCI %s called with invalid args\n", func);
1345                 return -EINVAL;
1346         }
1347         if (!udev->parent) {
1348                 pr_debug("xHCI %s called for root hub\n", func);
1349                 return 0;
1350         }
1351 
1352         xhci = hcd_to_xhci(hcd);
1353         if (check_virt_dev) {
1354                 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1355                         xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1356                                         func);
1357                         return -EINVAL;
1358                 }
1359 
1360                 virt_dev = xhci->devs[udev->slot_id];
1361                 if (virt_dev->udev != udev) {
1362                         xhci_dbg(xhci, "xHCI %s called with udev and "
1363                                           "virt_dev does not match\n", func);
1364                         return -EINVAL;
1365                 }
1366         }
1367 
1368         if (xhci->xhc_state & XHCI_STATE_HALTED)
1369                 return -ENODEV;
1370 
1371         return 1;
1372 }
1373 
1374 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1375                 struct usb_device *udev, struct xhci_command *command,
1376                 bool ctx_change, bool must_succeed);
1377 
1378 
1379 
1380 
1381 
1382 
1383 
1384 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1385                 unsigned int ep_index, struct urb *urb)
1386 {
1387         struct xhci_container_ctx *out_ctx;
1388         struct xhci_input_control_ctx *ctrl_ctx;
1389         struct xhci_ep_ctx *ep_ctx;
1390         struct xhci_command *command;
1391         int max_packet_size;
1392         int hw_max_packet_size;
1393         int ret = 0;
1394 
1395         out_ctx = xhci->devs[slot_id]->out_ctx;
1396         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1397         hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1398         max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1399         if (hw_max_packet_size != max_packet_size) {
1400                 xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1401                                 "Max Packet Size for ep 0 changed.");
1402                 xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1403                                 "Max packet size in usb_device = %d",
1404                                 max_packet_size);
1405                 xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1406                                 "Max packet size in xHCI HW = %d",
1407                                 hw_max_packet_size);
1408                 xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1409                                 "Issuing evaluate context command.");
1410 
1411                 
1412                 
1413 
1414 
1415 
1416                 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1417                 if (!command)
1418                         return -ENOMEM;
1419 
1420                 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1421                 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1422                 if (!ctrl_ctx) {
1423                         xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1424                                         __func__);
1425                         ret = -ENOMEM;
1426                         goto command_cleanup;
1427                 }
1428                 
1429                 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1430                                 xhci->devs[slot_id]->out_ctx, ep_index);
1431 
1432                 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1433                 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1434                 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1435 
1436                 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1437                 ctrl_ctx->drop_flags = 0;
1438 
1439                 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1440                                 true, false);
1441 
1442                 
1443 
1444 
1445                 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1446 command_cleanup:
1447                 kfree(command->completion);
1448                 kfree(command);
1449         }
1450         return ret;
1451 }
1452 
1453 
1454 
1455 
1456 
1457 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1458 {
1459         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1460         unsigned long flags;
1461         int ret = 0;
1462         unsigned int slot_id, ep_index;
1463         unsigned int *ep_state;
1464         struct urb_priv *urb_priv;
1465         int num_tds;
1466 
1467         if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1468                                         true, true, __func__) <= 0)
1469                 return -EINVAL;
1470 
1471         slot_id = urb->dev->slot_id;
1472         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1473         ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1474 
1475         if (!HCD_HW_ACCESSIBLE(hcd)) {
1476                 if (!in_interrupt())
1477                         xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1478                 return -ESHUTDOWN;
1479         }
1480         if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1481                 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1482                 return -ENODEV;
1483         }
1484 
1485         if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1486                 num_tds = urb->number_of_packets;
1487         else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1488             urb->transfer_buffer_length > 0 &&
1489             urb->transfer_flags & URB_ZERO_PACKET &&
1490             !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1491                 num_tds = 2;
1492         else
1493                 num_tds = 1;
1494 
1495         urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1496         if (!urb_priv)
1497                 return -ENOMEM;
1498 
1499         urb_priv->num_tds = num_tds;
1500         urb_priv->num_tds_done = 0;
1501         urb->hcpriv = urb_priv;
1502 
1503         trace_xhci_urb_enqueue(urb);
1504 
1505         if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1506                 
1507 
1508 
1509                 if (urb->dev->speed == USB_SPEED_FULL) {
1510                         ret = xhci_check_maxpacket(xhci, slot_id,
1511                                         ep_index, urb);
1512                         if (ret < 0) {
1513                                 xhci_urb_free_priv(urb_priv);
1514                                 urb->hcpriv = NULL;
1515                                 return ret;
1516                         }
1517                 }
1518         }
1519 
1520         spin_lock_irqsave(&xhci->lock, flags);
1521 
1522         if (xhci->xhc_state & XHCI_STATE_DYING) {
1523                 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1524                          urb->ep->desc.bEndpointAddress, urb);
1525                 ret = -ESHUTDOWN;
1526                 goto free_priv;
1527         }
1528         if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1529                 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1530                           *ep_state);
1531                 ret = -EINVAL;
1532                 goto free_priv;
1533         }
1534         if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1535                 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1536                 ret = -EINVAL;
1537                 goto free_priv;
1538         }
1539 
1540         switch (usb_endpoint_type(&urb->ep->desc)) {
1541 
1542         case USB_ENDPOINT_XFER_CONTROL:
1543                 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1544                                          slot_id, ep_index);
1545                 break;
1546         case USB_ENDPOINT_XFER_BULK:
1547                 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1548                                          slot_id, ep_index);
1549                 break;
1550         case USB_ENDPOINT_XFER_INT:
1551                 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1552                                 slot_id, ep_index);
1553                 break;
1554         case USB_ENDPOINT_XFER_ISOC:
1555                 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1556                                 slot_id, ep_index);
1557         }
1558 
1559         if (ret) {
1560 free_priv:
1561                 xhci_urb_free_priv(urb_priv);
1562                 urb->hcpriv = NULL;
1563         }
1564         spin_unlock_irqrestore(&xhci->lock, flags);
1565         return ret;
1566 }
1567 
1568 
1569 
1570 
1571 
1572 
1573 
1574 
1575 
1576 
1577 
1578 
1579 
1580 
1581 
1582 
1583 
1584 
1585 
1586 
1587 
1588 
1589 
1590 
1591 
1592 
1593 
1594 
1595 
1596 
1597 
1598 
1599 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1600 {
1601         unsigned long flags;
1602         int ret, i;
1603         u32 temp;
1604         struct xhci_hcd *xhci;
1605         struct urb_priv *urb_priv;
1606         struct xhci_td *td;
1607         unsigned int ep_index;
1608         struct xhci_ring *ep_ring;
1609         struct xhci_virt_ep *ep;
1610         struct xhci_command *command;
1611         struct xhci_virt_device *vdev;
1612 
1613         xhci = hcd_to_xhci(hcd);
1614         spin_lock_irqsave(&xhci->lock, flags);
1615 
1616         trace_xhci_urb_dequeue(urb);
1617 
1618         
1619         ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1620         if (ret)
1621                 goto done;
1622 
1623         
1624         vdev = xhci->devs[urb->dev->slot_id];
1625         urb_priv = urb->hcpriv;
1626         if (!vdev || !urb_priv)
1627                 goto err_giveback;
1628 
1629         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1630         ep = &vdev->eps[ep_index];
1631         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1632         if (!ep || !ep_ring)
1633                 goto err_giveback;
1634 
1635         
1636         temp = readl(&xhci->op_regs->status);
1637         if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1638                 xhci_hc_died(xhci);
1639                 goto done;
1640         }
1641 
1642         
1643 
1644 
1645 
1646 
1647         if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1648                 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1649                 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1650                         td = &urb_priv->td[i];
1651                         if (!list_empty(&td->cancelled_td_list))
1652                                 list_del_init(&td->cancelled_td_list);
1653                 }
1654                 goto err_giveback;
1655         }
1656 
1657         if (xhci->xhc_state & XHCI_STATE_HALTED) {
1658                 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1659                                 "HC halted, freeing TD manually.");
1660                 for (i = urb_priv->num_tds_done;
1661                      i < urb_priv->num_tds;
1662                      i++) {
1663                         td = &urb_priv->td[i];
1664                         if (!list_empty(&td->td_list))
1665                                 list_del_init(&td->td_list);
1666                         if (!list_empty(&td->cancelled_td_list))
1667                                 list_del_init(&td->cancelled_td_list);
1668                 }
1669                 goto err_giveback;
1670         }
1671 
1672         i = urb_priv->num_tds_done;
1673         if (i < urb_priv->num_tds)
1674                 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1675                                 "Cancel URB %p, dev %s, ep 0x%x, "
1676                                 "starting at offset 0x%llx",
1677                                 urb, urb->dev->devpath,
1678                                 urb->ep->desc.bEndpointAddress,
1679                                 (unsigned long long) xhci_trb_virt_to_dma(
1680                                         urb_priv->td[i].start_seg,
1681                                         urb_priv->td[i].first_trb));
1682 
1683         for (; i < urb_priv->num_tds; i++) {
1684                 td = &urb_priv->td[i];
1685                 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1686         }
1687 
1688         
1689 
1690 
1691         if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1692                 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1693                 if (!command) {
1694                         ret = -ENOMEM;
1695                         goto done;
1696                 }
1697                 ep->ep_state |= EP_STOP_CMD_PENDING;
1698                 ep->stop_cmd_timer.expires = jiffies +
1699                         XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1700                 add_timer(&ep->stop_cmd_timer);
1701                 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1702                                          ep_index, 0);
1703                 xhci_ring_cmd_db(xhci);
1704         }
1705 done:
1706         spin_unlock_irqrestore(&xhci->lock, flags);
1707         return ret;
1708 
1709 err_giveback:
1710         if (urb_priv)
1711                 xhci_urb_free_priv(urb_priv);
1712         usb_hcd_unlink_urb_from_ep(hcd, urb);
1713         spin_unlock_irqrestore(&xhci->lock, flags);
1714         usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1715         return ret;
1716 }
1717 
1718 
1719 
1720 
1721 
1722 
1723 
1724 
1725 
1726 
1727 
1728 
1729 
1730 
1731 static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1732                 struct usb_host_endpoint *ep)
1733 {
1734         struct xhci_hcd *xhci;
1735         struct xhci_container_ctx *in_ctx, *out_ctx;
1736         struct xhci_input_control_ctx *ctrl_ctx;
1737         unsigned int ep_index;
1738         struct xhci_ep_ctx *ep_ctx;
1739         u32 drop_flag;
1740         u32 new_add_flags, new_drop_flags;
1741         int ret;
1742 
1743         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1744         if (ret <= 0)
1745                 return ret;
1746         xhci = hcd_to_xhci(hcd);
1747         if (xhci->xhc_state & XHCI_STATE_DYING)
1748                 return -ENODEV;
1749 
1750         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1751         drop_flag = xhci_get_endpoint_flag(&ep->desc);
1752         if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1753                 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1754                                 __func__, drop_flag);
1755                 return 0;
1756         }
1757 
1758         in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1759         out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1760         ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1761         if (!ctrl_ctx) {
1762                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1763                                 __func__);
1764                 return 0;
1765         }
1766 
1767         ep_index = xhci_get_endpoint_index(&ep->desc);
1768         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1769         
1770 
1771 
1772         if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1773             le32_to_cpu(ctrl_ctx->drop_flags) &
1774             xhci_get_endpoint_flag(&ep->desc)) {
1775                 
1776                 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1777                         xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1778                                   __func__, ep);
1779                 return 0;
1780         }
1781 
1782         ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1783         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1784 
1785         ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1786         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1787 
1788         xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1789 
1790         xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1791 
1792         if (xhci->quirks & XHCI_MTK_HOST)
1793                 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1794 
1795         xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1796                         (unsigned int) ep->desc.bEndpointAddress,
1797                         udev->slot_id,
1798                         (unsigned int) new_drop_flags,
1799                         (unsigned int) new_add_flags);
1800         return 0;
1801 }
1802 
1803 
1804 
1805 
1806 
1807 
1808 
1809 
1810 
1811 
1812 
1813 
1814 
1815 
1816 static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1817                 struct usb_host_endpoint *ep)
1818 {
1819         struct xhci_hcd *xhci;
1820         struct xhci_container_ctx *in_ctx;
1821         unsigned int ep_index;
1822         struct xhci_input_control_ctx *ctrl_ctx;
1823         struct xhci_ep_ctx *ep_ctx;
1824         u32 added_ctxs;
1825         u32 new_add_flags, new_drop_flags;
1826         struct xhci_virt_device *virt_dev;
1827         int ret = 0;
1828 
1829         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1830         if (ret <= 0) {
1831                 
1832                 ep->hcpriv = NULL;
1833                 return ret;
1834         }
1835         xhci = hcd_to_xhci(hcd);
1836         if (xhci->xhc_state & XHCI_STATE_DYING)
1837                 return -ENODEV;
1838 
1839         added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1840         if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1841                 
1842 
1843 
1844 
1845                 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1846                                 __func__, added_ctxs);
1847                 return 0;
1848         }
1849 
1850         virt_dev = xhci->devs[udev->slot_id];
1851         in_ctx = virt_dev->in_ctx;
1852         ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1853         if (!ctrl_ctx) {
1854                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1855                                 __func__);
1856                 return 0;
1857         }
1858 
1859         ep_index = xhci_get_endpoint_index(&ep->desc);
1860         
1861 
1862 
1863         if (virt_dev->eps[ep_index].ring &&
1864                         !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1865                 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1866                                 "without dropping it.\n",
1867                                 (unsigned int) ep->desc.bEndpointAddress);
1868                 return -EINVAL;
1869         }
1870 
1871         
1872 
1873 
1874         if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1875                 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1876                                 __func__, ep);
1877                 return 0;
1878         }
1879 
1880         
1881 
1882 
1883 
1884 
1885         if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1886                 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1887                                 __func__, ep->desc.bEndpointAddress);
1888                 return -ENOMEM;
1889         }
1890 
1891         if (xhci->quirks & XHCI_MTK_HOST) {
1892                 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1893                 if (ret < 0) {
1894                         xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1895                         virt_dev->eps[ep_index].new_ring = NULL;
1896                         return ret;
1897                 }
1898         }
1899 
1900         ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1901         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1902 
1903         
1904 
1905 
1906 
1907 
1908 
1909         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1910 
1911         
1912         ep->hcpriv = udev;
1913 
1914         ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1915         trace_xhci_add_endpoint(ep_ctx);
1916 
1917         xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1918 
1919         xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1920                         (unsigned int) ep->desc.bEndpointAddress,
1921                         udev->slot_id,
1922                         (unsigned int) new_drop_flags,
1923                         (unsigned int) new_add_flags);
1924         return 0;
1925 }
1926 
1927 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1928 {
1929         struct xhci_input_control_ctx *ctrl_ctx;
1930         struct xhci_ep_ctx *ep_ctx;
1931         struct xhci_slot_ctx *slot_ctx;
1932         int i;
1933 
1934         ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1935         if (!ctrl_ctx) {
1936                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1937                                 __func__);
1938                 return;
1939         }
1940 
1941         
1942 
1943 
1944 
1945 
1946         ctrl_ctx->drop_flags = 0;
1947         ctrl_ctx->add_flags = 0;
1948         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1949         slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1950         
1951         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1952         for (i = 1; i < 31; i++) {
1953                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1954                 ep_ctx->ep_info = 0;
1955                 ep_ctx->ep_info2 = 0;
1956                 ep_ctx->deq = 0;
1957                 ep_ctx->tx_info = 0;
1958         }
1959 }
1960 
1961 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1962                 struct usb_device *udev, u32 *cmd_status)
1963 {
1964         int ret;
1965 
1966         switch (*cmd_status) {
1967         case COMP_COMMAND_ABORTED:
1968         case COMP_COMMAND_RING_STOPPED:
1969                 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1970                 ret = -ETIME;
1971                 break;
1972         case COMP_RESOURCE_ERROR:
1973                 dev_warn(&udev->dev,
1974                          "Not enough host controller resources for new device state.\n");
1975                 ret = -ENOMEM;
1976                 
1977                 break;
1978         case COMP_BANDWIDTH_ERROR:
1979         case COMP_SECONDARY_BANDWIDTH_ERROR:
1980                 dev_warn(&udev->dev,
1981                          "Not enough bandwidth for new device state.\n");
1982                 ret = -ENOSPC;
1983                 
1984                 break;
1985         case COMP_TRB_ERROR:
1986                 
1987                 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1988                                 "add flag = 1, "
1989                                 "and endpoint is not disabled.\n");
1990                 ret = -EINVAL;
1991                 break;
1992         case COMP_INCOMPATIBLE_DEVICE_ERROR:
1993                 dev_warn(&udev->dev,
1994                          "ERROR: Incompatible device for endpoint configure command.\n");
1995                 ret = -ENODEV;
1996                 break;
1997         case COMP_SUCCESS:
1998                 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1999                                 "Successful Endpoint Configure command");
2000                 ret = 0;
2001                 break;
2002         default:
2003                 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2004                                 *cmd_status);
2005                 ret = -EINVAL;
2006                 break;
2007         }
2008         return ret;
2009 }
2010 
2011 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2012                 struct usb_device *udev, u32 *cmd_status)
2013 {
2014         int ret;
2015 
2016         switch (*cmd_status) {
2017         case COMP_COMMAND_ABORTED:
2018         case COMP_COMMAND_RING_STOPPED:
2019                 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2020                 ret = -ETIME;
2021                 break;
2022         case COMP_PARAMETER_ERROR:
2023                 dev_warn(&udev->dev,
2024                          "WARN: xHCI driver setup invalid evaluate context command.\n");
2025                 ret = -EINVAL;
2026                 break;
2027         case COMP_SLOT_NOT_ENABLED_ERROR:
2028                 dev_warn(&udev->dev,
2029                         "WARN: slot not enabled for evaluate context command.\n");
2030                 ret = -EINVAL;
2031                 break;
2032         case COMP_CONTEXT_STATE_ERROR:
2033                 dev_warn(&udev->dev,
2034                         "WARN: invalid context state for evaluate context command.\n");
2035                 ret = -EINVAL;
2036                 break;
2037         case COMP_INCOMPATIBLE_DEVICE_ERROR:
2038                 dev_warn(&udev->dev,
2039                         "ERROR: Incompatible device for evaluate context command.\n");
2040                 ret = -ENODEV;
2041                 break;
2042         case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2043                 
2044                 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2045                 ret = -EINVAL;
2046                 break;
2047         case COMP_SUCCESS:
2048                 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2049                                 "Successful evaluate context command");
2050                 ret = 0;
2051                 break;
2052         default:
2053                 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2054                         *cmd_status);
2055                 ret = -EINVAL;
2056                 break;
2057         }
2058         return ret;
2059 }
2060 
2061 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2062                 struct xhci_input_control_ctx *ctrl_ctx)
2063 {
2064         u32 valid_add_flags;
2065         u32 valid_drop_flags;
2066 
2067         
2068 
2069 
2070 
2071         valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2072         valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2073 
2074         
2075 
2076 
2077 
2078         return hweight32(valid_add_flags) -
2079                 hweight32(valid_add_flags & valid_drop_flags);
2080 }
2081 
2082 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2083                 struct xhci_input_control_ctx *ctrl_ctx)
2084 {
2085         u32 valid_add_flags;
2086         u32 valid_drop_flags;
2087 
2088         valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2089         valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2090 
2091         return hweight32(valid_drop_flags) -
2092                 hweight32(valid_add_flags & valid_drop_flags);
2093 }
2094 
2095 
2096 
2097 
2098 
2099 
2100 
2101 
2102 
2103 
2104 
2105 
2106 
2107 
2108 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2109                 struct xhci_input_control_ctx *ctrl_ctx)
2110 {
2111         u32 added_eps;
2112 
2113         added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2114         if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2115                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2116                                 "Not enough ep ctxs: "
2117                                 "%u active, need to add %u, limit is %u.",
2118                                 xhci->num_active_eps, added_eps,
2119                                 xhci->limit_active_eps);
2120                 return -ENOMEM;
2121         }
2122         xhci->num_active_eps += added_eps;
2123         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2124                         "Adding %u ep ctxs, %u now active.", added_eps,
2125                         xhci->num_active_eps);
2126         return 0;
2127 }
2128 
2129 
2130 
2131 
2132 
2133 
2134 
2135 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2136                 struct xhci_input_control_ctx *ctrl_ctx)
2137 {
2138         u32 num_failed_eps;
2139 
2140         num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2141         xhci->num_active_eps -= num_failed_eps;
2142         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2143                         "Removing %u failed ep ctxs, %u now active.",
2144                         num_failed_eps,
2145                         xhci->num_active_eps);
2146 }
2147 
2148 
2149 
2150 
2151 
2152 
2153 
2154 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2155                 struct xhci_input_control_ctx *ctrl_ctx)
2156 {
2157         u32 num_dropped_eps;
2158 
2159         num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2160         xhci->num_active_eps -= num_dropped_eps;
2161         if (num_dropped_eps)
2162                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2163                                 "Removing %u dropped ep ctxs, %u now active.",
2164                                 num_dropped_eps,
2165                                 xhci->num_active_eps);
2166 }
2167 
2168 static unsigned int xhci_get_block_size(struct usb_device *udev)
2169 {
2170         switch (udev->speed) {
2171         case USB_SPEED_LOW:
2172         case USB_SPEED_FULL:
2173                 return FS_BLOCK;
2174         case USB_SPEED_HIGH:
2175                 return HS_BLOCK;
2176         case USB_SPEED_SUPER:
2177         case USB_SPEED_SUPER_PLUS:
2178                 return SS_BLOCK;
2179         case USB_SPEED_UNKNOWN:
2180         case USB_SPEED_WIRELESS:
2181         default:
2182                 
2183                 return 1;
2184         }
2185 }
2186 
2187 static unsigned int
2188 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2189 {
2190         if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2191                 return LS_OVERHEAD;
2192         if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2193                 return FS_OVERHEAD;
2194         return HS_OVERHEAD;
2195 }
2196 
2197 
2198 
2199 
2200 
2201 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2202                 struct xhci_virt_device *virt_dev,
2203                 int old_active_eps)
2204 {
2205         struct xhci_interval_bw_table *bw_table;
2206         struct xhci_tt_bw_info *tt_info;
2207 
2208         
2209         bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2210         tt_info = virt_dev->tt_info;
2211         
2212 
2213 
2214 
2215         if (old_active_eps)
2216                 return 0;
2217         if (old_active_eps == 0 && tt_info->active_eps != 0) {
2218                 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2219                         return -ENOMEM;
2220                 return 0;
2221         }
2222         
2223 
2224 
2225 
2226 
2227 
2228         return 0;
2229 }
2230 
2231 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2232                 struct xhci_virt_device *virt_dev)
2233 {
2234         unsigned int bw_reserved;
2235 
2236         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2237         if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2238                 return -ENOMEM;
2239 
2240         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2241         if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2242                 return -ENOMEM;
2243 
2244         return 0;
2245 }
2246 
2247 
2248 
2249 
2250 
2251 
2252 
2253 
2254 
2255 
2256 
2257 
2258 
2259 
2260 
2261 
2262 
2263 
2264 
2265 
2266 
2267 
2268 
2269 
2270 
2271 
2272 
2273 
2274 
2275 
2276 
2277 
2278 
2279 
2280 
2281 
2282 
2283 
2284 
2285 
2286 
2287 
2288 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2289                 struct xhci_virt_device *virt_dev,
2290                 int old_active_eps)
2291 {
2292         unsigned int bw_reserved;
2293         unsigned int max_bandwidth;
2294         unsigned int bw_used;
2295         unsigned int block_size;
2296         struct xhci_interval_bw_table *bw_table;
2297         unsigned int packet_size = 0;
2298         unsigned int overhead = 0;
2299         unsigned int packets_transmitted = 0;
2300         unsigned int packets_remaining = 0;
2301         unsigned int i;
2302 
2303         if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2304                 return xhci_check_ss_bw(xhci, virt_dev);
2305 
2306         if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2307                 max_bandwidth = HS_BW_LIMIT;
2308                 
2309                 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2310         } else {
2311                 max_bandwidth = FS_BW_LIMIT;
2312                 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2313         }
2314 
2315         bw_table = virt_dev->bw_table;
2316         
2317 
2318 
2319         block_size = xhci_get_block_size(virt_dev->udev);
2320 
2321         
2322 
2323 
2324         if (virt_dev->tt_info) {
2325                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2326                                 "Recalculating BW for rootport %u",
2327                                 virt_dev->real_port);
2328                 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2329                         xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2330                                         "newly activated TT.\n");
2331                         return -ENOMEM;
2332                 }
2333                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2334                                 "Recalculating BW for TT slot %u port %u",
2335                                 virt_dev->tt_info->slot_id,
2336                                 virt_dev->tt_info->ttport);
2337         } else {
2338                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2339                                 "Recalculating BW for rootport %u",
2340                                 virt_dev->real_port);
2341         }
2342 
2343         
2344 
2345 
2346         bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2347                 bw_table->interval_bw[0].num_packets *
2348                 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2349 
2350         for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2351                 unsigned int bw_added;
2352                 unsigned int largest_mps;
2353                 unsigned int interval_overhead;
2354 
2355                 
2356 
2357 
2358 
2359 
2360                 packets_remaining = 2 * packets_remaining +
2361                         bw_table->interval_bw[i].num_packets;
2362 
2363                 
2364 
2365 
2366                 if (list_empty(&bw_table->interval_bw[i].endpoints))
2367                         largest_mps = 0;
2368                 else {
2369                         struct xhci_virt_ep *virt_ep;
2370                         struct list_head *ep_entry;
2371 
2372                         ep_entry = bw_table->interval_bw[i].endpoints.next;
2373                         virt_ep = list_entry(ep_entry,
2374                                         struct xhci_virt_ep, bw_endpoint_list);
2375                         
2376                         largest_mps = DIV_ROUND_UP(
2377                                         virt_ep->bw_info.max_packet_size,
2378                                         block_size);
2379                 }
2380                 if (largest_mps > packet_size)
2381                         packet_size = largest_mps;
2382 
2383                 
2384                 interval_overhead = xhci_get_largest_overhead(
2385                                 &bw_table->interval_bw[i]);
2386                 if (interval_overhead > overhead)
2387                         overhead = interval_overhead;
2388 
2389                 
2390 
2391 
2392                 packets_transmitted = packets_remaining >> (i + 1);
2393 
2394                 
2395                 bw_added = packets_transmitted * (overhead + packet_size);
2396 
2397                 
2398                 packets_remaining = packets_remaining % (1 << (i + 1));
2399 
2400                 
2401                 
2402 
2403 
2404                 if (packets_remaining == 0) {
2405                         packet_size = 0;
2406                         overhead = 0;
2407                 } else if (packets_transmitted > 0) {
2408                         
2409 
2410 
2411 
2412 
2413                         packet_size = largest_mps;
2414                         overhead = interval_overhead;
2415                 }
2416                 
2417 
2418 
2419                 bw_used += bw_added;
2420                 if (bw_used > max_bandwidth) {
2421                         xhci_warn(xhci, "Not enough bandwidth. "
2422                                         "Proposed: %u, Max: %u\n",
2423                                 bw_used, max_bandwidth);
2424                         return -ENOMEM;
2425                 }
2426         }
2427         
2428 
2429 
2430 
2431 
2432 
2433         if (packets_remaining > 0)
2434                 bw_used += overhead + packet_size;
2435 
2436         if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2437                 unsigned int port_index = virt_dev->real_port - 1;
2438 
2439                 
2440 
2441 
2442 
2443                 bw_used += TT_HS_OVERHEAD *
2444                         xhci->rh_bw[port_index].num_active_tts;
2445         }
2446 
2447         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2448                 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2449                 "Available: %u " "percent",
2450                 bw_used, max_bandwidth, bw_reserved,
2451                 (max_bandwidth - bw_used - bw_reserved) * 100 /
2452                 max_bandwidth);
2453 
2454         bw_used += bw_reserved;
2455         if (bw_used > max_bandwidth) {
2456                 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2457                                 bw_used, max_bandwidth);
2458                 return -ENOMEM;
2459         }
2460 
2461         bw_table->bw_used = bw_used;
2462         return 0;
2463 }
2464 
2465 static bool xhci_is_async_ep(unsigned int ep_type)
2466 {
2467         return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2468                                         ep_type != ISOC_IN_EP &&
2469                                         ep_type != INT_IN_EP);
2470 }
2471 
2472 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2473 {
2474         return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2475 }
2476 
2477 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2478 {
2479         unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2480 
2481         if (ep_bw->ep_interval == 0)
2482                 return SS_OVERHEAD_BURST +
2483                         (ep_bw->mult * ep_bw->num_packets *
2484                                         (SS_OVERHEAD + mps));
2485         return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2486                                 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2487                                 1 << ep_bw->ep_interval);
2488 
2489 }
2490 
2491 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2492                 struct xhci_bw_info *ep_bw,
2493                 struct xhci_interval_bw_table *bw_table,
2494                 struct usb_device *udev,
2495                 struct xhci_virt_ep *virt_ep,
2496                 struct xhci_tt_bw_info *tt_info)
2497 {
2498         struct xhci_interval_bw *interval_bw;
2499         int normalized_interval;
2500 
2501         if (xhci_is_async_ep(ep_bw->type))
2502                 return;
2503 
2504         if (udev->speed >= USB_SPEED_SUPER) {
2505                 if (xhci_is_sync_in_ep(ep_bw->type))
2506                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2507                                 xhci_get_ss_bw_consumed(ep_bw);
2508                 else
2509                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2510                                 xhci_get_ss_bw_consumed(ep_bw);
2511                 return;
2512         }
2513 
2514         
2515 
2516 
2517         if (list_empty(&virt_ep->bw_endpoint_list))
2518                 return;
2519         
2520 
2521 
2522         if (udev->speed == USB_SPEED_HIGH)
2523                 normalized_interval = ep_bw->ep_interval;
2524         else
2525                 normalized_interval = ep_bw->ep_interval - 3;
2526 
2527         if (normalized_interval == 0)
2528                 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2529         interval_bw = &bw_table->interval_bw[normalized_interval];
2530         interval_bw->num_packets -= ep_bw->num_packets;
2531         switch (udev->speed) {
2532         case USB_SPEED_LOW:
2533                 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2534                 break;
2535         case USB_SPEED_FULL:
2536                 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2537                 break;
2538         case USB_SPEED_HIGH:
2539                 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2540                 break;
2541         case USB_SPEED_SUPER:
2542         case USB_SPEED_SUPER_PLUS:
2543         case USB_SPEED_UNKNOWN:
2544         case USB_SPEED_WIRELESS:
2545                 
2546 
2547 
2548                 return;
2549         }
2550         if (tt_info)
2551                 tt_info->active_eps -= 1;
2552         list_del_init(&virt_ep->bw_endpoint_list);
2553 }
2554 
2555 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2556                 struct xhci_bw_info *ep_bw,
2557                 struct xhci_interval_bw_table *bw_table,
2558                 struct usb_device *udev,
2559                 struct xhci_virt_ep *virt_ep,
2560                 struct xhci_tt_bw_info *tt_info)
2561 {
2562         struct xhci_interval_bw *interval_bw;
2563         struct xhci_virt_ep *smaller_ep;
2564         int normalized_interval;
2565 
2566         if (xhci_is_async_ep(ep_bw->type))
2567                 return;
2568 
2569         if (udev->speed == USB_SPEED_SUPER) {
2570                 if (xhci_is_sync_in_ep(ep_bw->type))
2571                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2572                                 xhci_get_ss_bw_consumed(ep_bw);
2573                 else
2574                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2575                                 xhci_get_ss_bw_consumed(ep_bw);
2576                 return;
2577         }
2578 
2579         
2580 
2581 
2582         if (udev->speed == USB_SPEED_HIGH)
2583                 normalized_interval = ep_bw->ep_interval;
2584         else
2585                 normalized_interval = ep_bw->ep_interval - 3;
2586 
2587         if (normalized_interval == 0)
2588                 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2589         interval_bw = &bw_table->interval_bw[normalized_interval];
2590         interval_bw->num_packets += ep_bw->num_packets;
2591         switch (udev->speed) {
2592         case USB_SPEED_LOW:
2593                 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2594                 break;
2595         case USB_SPEED_FULL:
2596                 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2597                 break;
2598         case USB_SPEED_HIGH:
2599                 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2600                 break;
2601         case USB_SPEED_SUPER:
2602         case USB_SPEED_SUPER_PLUS:
2603         case USB_SPEED_UNKNOWN:
2604         case USB_SPEED_WIRELESS:
2605                 
2606 
2607 
2608                 return;
2609         }
2610 
2611         if (tt_info)
2612                 tt_info->active_eps += 1;
2613         
2614         list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2615                         bw_endpoint_list) {
2616                 if (ep_bw->max_packet_size >=
2617                                 smaller_ep->bw_info.max_packet_size) {
2618                         
2619                         list_add_tail(&virt_ep->bw_endpoint_list,
2620                                         &smaller_ep->bw_endpoint_list);
2621                         return;
2622                 }
2623         }
2624         
2625         list_add_tail(&virt_ep->bw_endpoint_list,
2626                         &interval_bw->endpoints);
2627 }
2628 
2629 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2630                 struct xhci_virt_device *virt_dev,
2631                 int old_active_eps)
2632 {
2633         struct xhci_root_port_bw_info *rh_bw_info;
2634         if (!virt_dev->tt_info)
2635                 return;
2636 
2637         rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2638         if (old_active_eps == 0 &&
2639                                 virt_dev->tt_info->active_eps != 0) {
2640                 rh_bw_info->num_active_tts += 1;
2641                 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2642         } else if (old_active_eps != 0 &&
2643                                 virt_dev->tt_info->active_eps == 0) {
2644                 rh_bw_info->num_active_tts -= 1;
2645                 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2646         }
2647 }
2648 
2649 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2650                 struct xhci_virt_device *virt_dev,
2651                 struct xhci_container_ctx *in_ctx)
2652 {
2653         struct xhci_bw_info ep_bw_info[31];
2654         int i;
2655         struct xhci_input_control_ctx *ctrl_ctx;
2656         int old_active_eps = 0;
2657 
2658         if (virt_dev->tt_info)
2659                 old_active_eps = virt_dev->tt_info->active_eps;
2660 
2661         ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2662         if (!ctrl_ctx) {
2663                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2664                                 __func__);
2665                 return -ENOMEM;
2666         }
2667 
2668         for (i = 0; i < 31; i++) {
2669                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2670                         continue;
2671 
2672                 
2673                 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2674                                 sizeof(ep_bw_info[i]));
2675                 
2676 
2677 
2678                 if (EP_IS_DROPPED(ctrl_ctx, i))
2679                         xhci_drop_ep_from_interval_table(xhci,
2680                                         &virt_dev->eps[i].bw_info,
2681                                         virt_dev->bw_table,
2682                                         virt_dev->udev,
2683                                         &virt_dev->eps[i],
2684                                         virt_dev->tt_info);
2685         }
2686         
2687         xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2688         for (i = 0; i < 31; i++) {
2689                 
2690                 if (EP_IS_ADDED(ctrl_ctx, i))
2691                         xhci_add_ep_to_interval_table(xhci,
2692                                         &virt_dev->eps[i].bw_info,
2693                                         virt_dev->bw_table,
2694                                         virt_dev->udev,
2695                                         &virt_dev->eps[i],
2696                                         virt_dev->tt_info);
2697         }
2698 
2699         if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2700                 
2701 
2702 
2703                 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2704                 return 0;
2705         }
2706 
2707         
2708         for (i = 0; i < 31; i++) {
2709                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2710                         continue;
2711 
2712                 
2713 
2714 
2715                 if (EP_IS_ADDED(ctrl_ctx, i)) {
2716                         xhci_drop_ep_from_interval_table(xhci,
2717                                         &virt_dev->eps[i].bw_info,
2718                                         virt_dev->bw_table,
2719                                         virt_dev->udev,
2720                                         &virt_dev->eps[i],
2721                                         virt_dev->tt_info);
2722                 }
2723                 
2724                 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2725                                 sizeof(ep_bw_info[i]));
2726                 
2727                 if (EP_IS_DROPPED(ctrl_ctx, i))
2728                         xhci_add_ep_to_interval_table(xhci,
2729                                         &virt_dev->eps[i].bw_info,
2730                                         virt_dev->bw_table,
2731                                         virt_dev->udev,
2732                                         &virt_dev->eps[i],
2733                                         virt_dev->tt_info);
2734         }
2735         return -ENOMEM;
2736 }
2737 
2738 
2739 
2740 
2741 
2742 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2743                 struct usb_device *udev,
2744                 struct xhci_command *command,
2745                 bool ctx_change, bool must_succeed)
2746 {
2747         int ret;
2748         unsigned long flags;
2749         struct xhci_input_control_ctx *ctrl_ctx;
2750         struct xhci_virt_device *virt_dev;
2751         struct xhci_slot_ctx *slot_ctx;
2752 
2753         if (!command)
2754                 return -EINVAL;
2755 
2756         spin_lock_irqsave(&xhci->lock, flags);
2757 
2758         if (xhci->xhc_state & XHCI_STATE_DYING) {
2759                 spin_unlock_irqrestore(&xhci->lock, flags);
2760                 return -ESHUTDOWN;
2761         }
2762 
2763         virt_dev = xhci->devs[udev->slot_id];
2764 
2765         ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2766         if (!ctrl_ctx) {
2767                 spin_unlock_irqrestore(&xhci->lock, flags);
2768                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2769                                 __func__);
2770                 return -ENOMEM;
2771         }
2772 
2773         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2774                         xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2775                 spin_unlock_irqrestore(&xhci->lock, flags);
2776                 xhci_warn(xhci, "Not enough host resources, "
2777                                 "active endpoint contexts = %u\n",
2778                                 xhci->num_active_eps);
2779                 return -ENOMEM;
2780         }
2781         if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2782             xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2783                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2784                         xhci_free_host_resources(xhci, ctrl_ctx);
2785                 spin_unlock_irqrestore(&xhci->lock, flags);
2786                 xhci_warn(xhci, "Not enough bandwidth\n");
2787                 return -ENOMEM;
2788         }
2789 
2790         slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2791 
2792         trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2793         trace_xhci_configure_endpoint(slot_ctx);
2794 
2795         if (!ctx_change)
2796                 ret = xhci_queue_configure_endpoint(xhci, command,
2797                                 command->in_ctx->dma,
2798                                 udev->slot_id, must_succeed);
2799         else
2800                 ret = xhci_queue_evaluate_context(xhci, command,
2801                                 command->in_ctx->dma,
2802                                 udev->slot_id, must_succeed);
2803         if (ret < 0) {
2804                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2805                         xhci_free_host_resources(xhci, ctrl_ctx);
2806                 spin_unlock_irqrestore(&xhci->lock, flags);
2807                 xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
2808                                 "FIXME allocate a new ring segment");
2809                 return -ENOMEM;
2810         }
2811         xhci_ring_cmd_db(xhci);
2812         spin_unlock_irqrestore(&xhci->lock, flags);
2813 
2814         
2815         wait_for_completion(command->completion);
2816 
2817         if (!ctx_change)
2818                 ret = xhci_configure_endpoint_result(xhci, udev,
2819                                                      &command->status);
2820         else
2821                 ret = xhci_evaluate_context_result(xhci, udev,
2822                                                    &command->status);
2823 
2824         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2825                 spin_lock_irqsave(&xhci->lock, flags);
2826                 
2827 
2828 
2829                 if (ret)
2830                         xhci_free_host_resources(xhci, ctrl_ctx);
2831                 else
2832                         xhci_finish_resource_reservation(xhci, ctrl_ctx);
2833                 spin_unlock_irqrestore(&xhci->lock, flags);
2834         }
2835         return ret;
2836 }
2837 
2838 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2839         struct xhci_virt_device *vdev, int i)
2840 {
2841         struct xhci_virt_ep *ep = &vdev->eps[i];
2842 
2843         if (ep->ep_state & EP_HAS_STREAMS) {
2844                 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2845                                 xhci_get_endpoint_address(i));
2846                 xhci_free_stream_info(xhci, ep->stream_info);
2847                 ep->stream_info = NULL;
2848                 ep->ep_state &= ~EP_HAS_STREAMS;
2849         }
2850 }
2851 
2852 
2853 
2854 
2855 
2856 
2857 
2858 
2859 
2860 
2861 
2862 static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2863 {
2864         int i;
2865         int ret = 0;
2866         struct xhci_hcd *xhci;
2867         struct xhci_virt_device *virt_dev;
2868         struct xhci_input_control_ctx *ctrl_ctx;
2869         struct xhci_slot_ctx *slot_ctx;
2870         struct xhci_command *command;
2871 
2872         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2873         if (ret <= 0)
2874                 return ret;
2875         xhci = hcd_to_xhci(hcd);
2876         if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2877                 (xhci->xhc_state & XHCI_STATE_REMOVING))
2878                 return -ENODEV;
2879 
2880         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2881         virt_dev = xhci->devs[udev->slot_id];
2882 
2883         command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2884         if (!command)
2885                 return -ENOMEM;
2886 
2887         command->in_ctx = virt_dev->in_ctx;
2888 
2889         
2890         ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2891         if (!ctrl_ctx) {
2892                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2893                                 __func__);
2894                 ret = -ENOMEM;
2895                 goto command_cleanup;
2896         }
2897         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2898         ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2899         ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2900 
2901         
2902         if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2903             ctrl_ctx->drop_flags == 0) {
2904                 ret = 0;
2905                 goto command_cleanup;
2906         }
2907         
2908         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2909         for (i = 31; i >= 1; i--) {
2910                 __le32 le32 = cpu_to_le32(BIT(i));
2911 
2912                 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2913                     || (ctrl_ctx->add_flags & le32) || i == 1) {
2914                         slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2915                         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2916                         break;
2917                 }
2918         }
2919 
2920         ret = xhci_configure_endpoint(xhci, udev, command,
2921                         false, false);
2922         if (ret)
2923                 
2924                 goto command_cleanup;
2925 
2926         
2927         for (i = 1; i < 31; i++) {
2928                 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2929                     !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2930                         xhci_free_endpoint_ring(xhci, virt_dev, i);
2931                         xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2932                 }
2933         }
2934         xhci_zero_in_ctx(xhci, virt_dev);
2935         
2936 
2937 
2938 
2939         for (i = 1; i < 31; i++) {
2940                 if (!virt_dev->eps[i].new_ring)
2941                         continue;
2942                 
2943 
2944 
2945                 if (virt_dev->eps[i].ring) {
2946                         xhci_free_endpoint_ring(xhci, virt_dev, i);
2947                 }
2948                 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2949                 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2950                 virt_dev->eps[i].new_ring = NULL;
2951         }
2952 command_cleanup:
2953         kfree(command->completion);
2954         kfree(command);
2955 
2956         return ret;
2957 }
2958 
2959 static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2960 {
2961         struct xhci_hcd *xhci;
2962         struct xhci_virt_device *virt_dev;
2963         int i, ret;
2964 
2965         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2966         if (ret <= 0)
2967                 return;
2968         xhci = hcd_to_xhci(hcd);
2969 
2970         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2971         virt_dev = xhci->devs[udev->slot_id];
2972         
2973         for (i = 0; i < 31; i++) {
2974                 if (virt_dev->eps[i].new_ring) {
2975                         xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2976                         xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2977                         virt_dev->eps[i].new_ring = NULL;
2978                 }
2979         }
2980         xhci_zero_in_ctx(xhci, virt_dev);
2981 }
2982 
2983 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2984                 struct xhci_container_ctx *in_ctx,
2985                 struct xhci_container_ctx *out_ctx,
2986                 struct xhci_input_control_ctx *ctrl_ctx,
2987                 u32 add_flags, u32 drop_flags)
2988 {
2989         ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2990         ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2991         xhci_slot_copy(xhci, in_ctx, out_ctx);
2992         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2993 }
2994 
2995 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2996                 unsigned int slot_id, unsigned int ep_index,
2997                 struct xhci_dequeue_state *deq_state)
2998 {
2999         struct xhci_input_control_ctx *ctrl_ctx;
3000         struct xhci_container_ctx *in_ctx;
3001         struct xhci_ep_ctx *ep_ctx;
3002         u32 added_ctxs;
3003         dma_addr_t addr;
3004 
3005         in_ctx = xhci->devs[slot_id]->in_ctx;
3006         ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
3007         if (!ctrl_ctx) {
3008                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3009                                 __func__);
3010                 return;
3011         }
3012 
3013         xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3014                         xhci->devs[slot_id]->out_ctx, ep_index);
3015         ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3016         addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3017                         deq_state->new_deq_ptr);
3018         if (addr == 0) {
3019                 xhci_warn(xhci, "WARN Cannot submit config ep after "
3020                                 "reset ep command\n");
3021                 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3022                                 deq_state->new_deq_seg,
3023                                 deq_state->new_deq_ptr);
3024                 return;
3025         }
3026         ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3027 
3028         added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3029         xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3030                         xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3031                         added_ctxs, added_ctxs);
3032 }
3033 
3034 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
3035                                unsigned int ep_index, unsigned int stream_id,
3036                                struct xhci_td *td)
3037 {
3038         struct xhci_dequeue_state deq_state;
3039 
3040         xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3041                         "Cleaning up stalled endpoint ring");
3042         
3043 
3044 
3045         xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
3046                                     &deq_state);
3047 
3048         if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3049                 return;
3050 
3051         
3052 
3053 
3054         if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3055                 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3056                                 "Queueing new dequeue state");
3057                 xhci_queue_new_dequeue_state(xhci, slot_id,
3058                                 ep_index, &deq_state);
3059         } else {
3060                 
3061 
3062 
3063 
3064 
3065                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3066                                 "Setting up input context for "
3067                                 "configure endpoint command");
3068                 xhci_setup_input_ctx_for_quirk(xhci, slot_id,
3069                                 ep_index, &deq_state);
3070         }
3071 }
3072 
3073 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3074                                   struct usb_host_endpoint *host_ep)
3075 {
3076         struct xhci_hcd         *xhci;
3077         struct xhci_virt_device *vdev;
3078         struct xhci_virt_ep     *ep;
3079         struct usb_device       *udev;
3080         unsigned long           flags;
3081         unsigned int            ep_index;
3082 
3083         xhci = hcd_to_xhci(hcd);
3084 rescan:
3085         spin_lock_irqsave(&xhci->lock, flags);
3086 
3087         udev = (struct usb_device *)host_ep->hcpriv;
3088         if (!udev || !udev->slot_id)
3089                 goto done;
3090 
3091         vdev = xhci->devs[udev->slot_id];
3092         if (!vdev)
3093                 goto done;
3094 
3095         ep_index = xhci_get_endpoint_index(&host_ep->desc);
3096         ep = &vdev->eps[ep_index];
3097         if (!ep)
3098                 goto done;
3099 
3100         
3101         if (ep->ep_state & EP_CLEARING_TT) {
3102                 spin_unlock_irqrestore(&xhci->lock, flags);
3103                 schedule_timeout_uninterruptible(1);
3104                 goto rescan;
3105         }
3106 
3107         if (ep->ep_state)
3108                 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3109                          ep->ep_state);
3110 done:
3111         host_ep->hcpriv = NULL;
3112         spin_unlock_irqrestore(&xhci->lock, flags);
3113 }
3114 
3115 
3116 
3117 
3118 
3119 
3120 
3121 
3122 
3123 
3124 
3125 
3126 
3127 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3128                 struct usb_host_endpoint *host_ep)
3129 {
3130         struct xhci_hcd *xhci;
3131         struct usb_device *udev;
3132         struct xhci_virt_device *vdev;
3133         struct xhci_virt_ep *ep;
3134         struct xhci_input_control_ctx *ctrl_ctx;
3135         struct xhci_command *stop_cmd, *cfg_cmd;
3136         unsigned int ep_index;
3137         unsigned long flags;
3138         u32 ep_flag;
3139         int err;
3140 
3141         xhci = hcd_to_xhci(hcd);
3142         if (!host_ep->hcpriv)
3143                 return;
3144         udev = (struct usb_device *) host_ep->hcpriv;
3145         vdev = xhci->devs[udev->slot_id];
3146 
3147         
3148 
3149 
3150 
3151 
3152         if (!udev->slot_id || !vdev)
3153                 return;
3154         ep_index = xhci_get_endpoint_index(&host_ep->desc);
3155         ep = &vdev->eps[ep_index];
3156         if (!ep)
3157                 return;
3158 
3159         
3160         if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3161                 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3162                 return;
3163         }
3164         
3165         if (usb_endpoint_xfer_control(&host_ep->desc) ||
3166             usb_endpoint_xfer_isoc(&host_ep->desc))
3167                 return;
3168 
3169         ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3170 
3171         if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3172                 return;
3173 
3174         stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3175         if (!stop_cmd)
3176                 return;
3177 
3178         cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3179         if (!cfg_cmd)
3180                 goto cleanup;
3181 
3182         spin_lock_irqsave(&xhci->lock, flags);
3183 
3184         
3185         ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3186 
3187         
3188 
3189 
3190 
3191 
3192 
3193         if (!list_empty(&ep->ring->td_list)) {
3194                 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3195                 spin_unlock_irqrestore(&xhci->lock, flags);
3196                 xhci_free_command(xhci, cfg_cmd);
3197                 goto cleanup;
3198         }
3199 
3200         err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3201                                         ep_index, 0);
3202         if (err < 0) {
3203                 spin_unlock_irqrestore(&xhci->lock, flags);
3204                 xhci_free_command(xhci, cfg_cmd);
3205                 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3206                                 __func__, err);
3207                 goto cleanup;
3208         }
3209 
3210         xhci_ring_cmd_db(xhci);
3211         spin_unlock_irqrestore(&xhci->lock, flags);
3212 
3213         wait_for_completion(stop_cmd->completion);
3214 
3215         spin_lock_irqsave(&xhci->lock, flags);
3216 
3217         
3218         ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3219         xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3220                                            ctrl_ctx, ep_flag, ep_flag);
3221         xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3222 
3223         err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3224                                       udev->slot_id, false);
3225         if (err < 0) {
3226                 spin_unlock_irqrestore(&xhci->lock, flags);
3227                 xhci_free_command(xhci, cfg_cmd);
3228                 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3229                                 __func__, err);
3230                 goto cleanup;
3231         }
3232 
3233         xhci_ring_cmd_db(xhci);
3234         spin_unlock_irqrestore(&xhci->lock, flags);
3235 
3236         wait_for_completion(cfg_cmd->completion);
3237 
3238         ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3239         xhci_free_command(xhci, cfg_cmd);
3240 cleanup:
3241         xhci_free_command(xhci, stop_cmd);
3242 }
3243 
3244 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3245                 struct usb_device *udev, struct usb_host_endpoint *ep,
3246                 unsigned int slot_id)
3247 {
3248         int ret;
3249         unsigned int ep_index;
3250         unsigned int ep_state;
3251 
3252         if (!ep)
3253                 return -EINVAL;
3254         ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3255         if (ret <= 0)
3256                 return -EINVAL;
3257         if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3258                 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3259                                 " descriptor for ep 0x%x does not support streams\n",
3260                                 ep->desc.bEndpointAddress);
3261                 return -EINVAL;
3262         }
3263 
3264         ep_index = xhci_get_endpoint_index(&ep->desc);
3265         ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3266         if (ep_state & EP_HAS_STREAMS ||
3267                         ep_state & EP_GETTING_STREAMS) {
3268                 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3269                                 "already has streams set up.\n",
3270                                 ep->desc.bEndpointAddress);
3271                 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3272                                 "dynamic stream context array reallocation.\n");
3273                 return -EINVAL;
3274         }
3275         if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3276                 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3277                                 "endpoint 0x%x; URBs are pending.\n",
3278                                 ep->desc.bEndpointAddress);
3279                 return -EINVAL;
3280         }
3281         return 0;
3282 }
3283 
3284 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3285                 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3286 {
3287         unsigned int max_streams;
3288 
3289         
3290         *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3291         
3292 
3293 
3294 
3295 
3296 
3297         max_streams = HCC_MAX_PSA(xhci->hcc_params);
3298         if (*num_stream_ctxs > max_streams) {
3299                 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3300                                 max_streams);
3301                 *num_stream_ctxs = max_streams;
3302                 *num_streams = max_streams;
3303         }
3304 }
3305 
3306 
3307 
3308 
3309 
3310 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3311                 struct usb_device *udev,
3312                 struct usb_host_endpoint **eps, unsigned int num_eps,
3313                 unsigned int *num_streams, u32 *changed_ep_bitmask)
3314 {
3315         unsigned int max_streams;
3316         unsigned int endpoint_flag;
3317         int i;
3318         int ret;
3319 
3320         for (i = 0; i < num_eps; i++) {
3321                 ret = xhci_check_streams_endpoint(xhci, udev,
3322                                 eps[i], udev->slot_id);
3323                 if (ret < 0)
3324                         return ret;
3325 
3326                 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3327                 if (max_streams < (*num_streams - 1)) {
3328                         xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3329                                         eps[i]->desc.bEndpointAddress,
3330                                         max_streams);
3331                         *num_streams = max_streams+1;
3332                 }
3333 
3334                 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3335                 if (*changed_ep_bitmask & endpoint_flag)
3336                         return -EINVAL;
3337                 *changed_ep_bitmask |= endpoint_flag;
3338         }
3339         return 0;
3340 }
3341 
3342 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3343                 struct usb_device *udev,
3344                 struct usb_host_endpoint **eps, unsigned int num_eps)
3345 {
3346         u32 changed_ep_bitmask = 0;
3347         unsigned int slot_id;
3348         unsigned int ep_index;
3349         unsigned int ep_state;
3350         int i;
3351 
3352         slot_id = udev->slot_id;
3353         if (!xhci->devs[slot_id])
3354                 return 0;
3355 
3356         for (i = 0; i < num_eps; i++) {
3357                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3358                 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3359                 
3360                 if (ep_state & EP_GETTING_NO_STREAMS) {
3361                         xhci_warn(xhci, "WARN Can't disable streams for "
3362                                         "endpoint 0x%x, "
3363                                         "streams are being disabled already\n",
3364                                         eps[i]->desc.bEndpointAddress);
3365                         return 0;
3366                 }
3367                 
3368                 if (!(ep_state & EP_HAS_STREAMS) &&
3369                                 !(ep_state & EP_GETTING_STREAMS)) {
3370                         xhci_warn(xhci, "WARN Can't disable streams for "
3371                                         "endpoint 0x%x, "
3372                                         "streams are already disabled!\n",
3373                                         eps[i]->desc.bEndpointAddress);
3374                         xhci_warn(xhci, "WARN xhci_free_streams() called "
3375                                         "with non-streams endpoint\n");
3376                         return 0;
3377                 }
3378                 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3379         }
3380         return changed_ep_bitmask;
3381 }
3382 
3383 
3384 
3385 
3386 
3387 
3388 
3389 
3390 
3391 
3392 
3393 
3394 
3395 
3396 
3397 
3398 
3399 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3400                 struct usb_host_endpoint **eps, unsigned int num_eps,
3401                 unsigned int num_streams, gfp_t mem_flags)
3402 {
3403         int i, ret;
3404         struct xhci_hcd *xhci;
3405         struct xhci_virt_device *vdev;
3406         struct xhci_command *config_cmd;
3407         struct xhci_input_control_ctx *ctrl_ctx;
3408         unsigned int ep_index;
3409         unsigned int num_stream_ctxs;
3410         unsigned int max_packet;
3411         unsigned long flags;
3412         u32 changed_ep_bitmask = 0;
3413 
3414         if (!eps)
3415                 return -EINVAL;
3416 
3417         
3418 
3419 
3420         num_streams += 1;
3421         xhci = hcd_to_xhci(hcd);
3422         xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3423                         num_streams);
3424 
3425         
3426         if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3427                         HCC_MAX_PSA(xhci->hcc_params) < 4) {
3428                 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3429                 return -ENOSYS;
3430         }
3431 
3432         config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3433         if (!config_cmd)
3434                 return -ENOMEM;
3435 
3436         ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3437         if (!ctrl_ctx) {
3438                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3439                                 __func__);
3440                 xhci_free_command(xhci, config_cmd);
3441                 return -ENOMEM;
3442         }
3443 
3444         
3445 
3446 
3447 
3448         spin_lock_irqsave(&xhci->lock, flags);
3449         ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3450                         num_eps, &num_streams, &changed_ep_bitmask);
3451         if (ret < 0) {
3452                 xhci_free_command(xhci, config_cmd);
3453                 spin_unlock_irqrestore(&xhci->lock, flags);
3454                 return ret;
3455         }
3456         if (num_streams <= 1) {
3457                 xhci_warn(xhci, "WARN: endpoints can't handle "
3458                                 "more than one stream.\n");
3459                 xhci_free_command(xhci, config_cmd);
3460                 spin_unlock_irqrestore(&xhci->lock, flags);
3461                 return -EINVAL;
3462         }
3463         vdev = xhci->devs[udev->slot_id];
3464         
3465 
3466 
3467         for (i = 0; i < num_eps; i++) {
3468                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3469                 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3470         }
3471         spin_unlock_irqrestore(&xhci->lock, flags);
3472 
3473         
3474 
3475 
3476 
3477         xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3478         xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3479                         num_stream_ctxs, num_streams);
3480 
3481         for (i = 0; i < num_eps; i++) {
3482                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3483                 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3484                 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3485                                 num_stream_ctxs,
3486                                 num_streams,
3487                                 max_packet, mem_flags);
3488                 if (!vdev->eps[ep_index].stream_info)
3489                         goto cleanup;
3490                 
3491 
3492 
3493         }
3494 
3495         
3496         for (i = 0; i < num_eps; i++) {
3497                 struct xhci_ep_ctx *ep_ctx;
3498 
3499                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3500                 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3501 
3502                 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3503                                 vdev->out_ctx, ep_index);
3504                 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3505                                 vdev->eps[ep_index].stream_info);
3506         }
3507         
3508 
3509 
3510         xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3511                         vdev->out_ctx, ctrl_ctx,
3512                         changed_ep_bitmask, changed_ep_bitmask);
3513 
3514         
3515         ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3516                         false, false);
3517 
3518         
3519 
3520 
3521 
3522         if (ret < 0)
3523                 goto cleanup;
3524 
3525         spin_lock_irqsave(&xhci->lock, flags);
3526         for (i = 0; i < num_eps; i++) {
3527                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3528                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3529                 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3530                          udev->slot_id, ep_index);
3531                 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3532         }
3533         xhci_free_command(xhci, config_cmd);
3534         spin_unlock_irqrestore(&xhci->lock, flags);
3535 
3536         
3537         return num_streams - 1;
3538 
3539 cleanup:
3540         
3541         for (i = 0; i < num_eps; i++) {
3542                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3543                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3544                 vdev->eps[ep_index].stream_info = NULL;
3545                 
3546 
3547 
3548                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3549                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3550                 xhci_endpoint_zero(xhci, vdev, eps[i]);
3551         }
3552         xhci_free_command(xhci, config_cmd);
3553         return -ENOMEM;
3554 }
3555 
3556 
3557 
3558 
3559 
3560 
3561 
3562 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3563                 struct usb_host_endpoint **eps, unsigned int num_eps,
3564                 gfp_t mem_flags)
3565 {
3566         int i, ret;
3567         struct xhci_hcd *xhci;
3568         struct xhci_virt_device *vdev;
3569         struct xhci_command *command;
3570         struct xhci_input_control_ctx *ctrl_ctx;
3571         unsigned int ep_index;
3572         unsigned long flags;
3573         u32 changed_ep_bitmask;
3574 
3575         xhci = hcd_to_xhci(hcd);
3576         vdev = xhci->devs[udev->slot_id];
3577 
3578         
3579         spin_lock_irqsave(&xhci->lock, flags);
3580         changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3581                         udev, eps, num_eps);
3582         if (changed_ep_bitmask == 0) {
3583                 spin_unlock_irqrestore(&xhci->lock, flags);
3584                 return -EINVAL;
3585         }
3586 
3587         
3588 
3589 
3590 
3591         ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3592         command = vdev->eps[ep_index].stream_info->free_streams_command;
3593         ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3594         if (!ctrl_ctx) {
3595                 spin_unlock_irqrestore(&xhci->lock, flags);
3596                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3597                                 __func__);
3598                 return -EINVAL;
3599         }
3600 
3601         for (i = 0; i < num_eps; i++) {
3602                 struct xhci_ep_ctx *ep_ctx;
3603 
3604                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3605                 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3606                 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3607                         EP_GETTING_NO_STREAMS;
3608 
3609                 xhci_endpoint_copy(xhci, command->in_ctx,
3610                                 vdev->out_ctx, ep_index);
3611                 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3612                                 &vdev->eps[ep_index]);
3613         }
3614         xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3615                         vdev->out_ctx, ctrl_ctx,
3616                         changed_ep_bitmask, changed_ep_bitmask);
3617         spin_unlock_irqrestore(&xhci->lock, flags);
3618 
3619         
3620 
3621 
3622         ret = xhci_configure_endpoint(xhci, udev, command,
3623                         false, true);
3624 
3625         
3626 
3627 
3628         if (ret < 0)
3629                 return ret;
3630 
3631         spin_lock_irqsave(&xhci->lock, flags);
3632         for (i = 0; i < num_eps; i++) {
3633                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3634                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3635                 vdev->eps[ep_index].stream_info = NULL;
3636                 
3637 
3638 
3639                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3640                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3641         }
3642         spin_unlock_irqrestore(&xhci->lock, flags);
3643 
3644         return 0;
3645 }
3646 
3647 
3648 
3649 
3650 
3651 
3652 
3653 
3654 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3655         struct xhci_virt_device *virt_dev, bool drop_control_ep)
3656 {
3657         int i;
3658         unsigned int num_dropped_eps = 0;
3659         unsigned int drop_flags = 0;
3660 
3661         for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3662                 if (virt_dev->eps[i].ring) {
3663                         drop_flags |= 1 << i;
3664                         num_dropped_eps++;
3665                 }
3666         }
3667         xhci->num_active_eps -= num_dropped_eps;
3668         if (num_dropped_eps)
3669                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3670                                 "Dropped %u ep ctxs, flags = 0x%x, "
3671                                 "%u now active.",
3672                                 num_dropped_eps, drop_flags,
3673                                 xhci->num_active_eps);
3674 }
3675 
3676 
3677 
3678 
3679 
3680 
3681 
3682 
3683 
3684 
3685 
3686 
3687 
3688 
3689 
3690 
3691 
3692 
3693 
3694 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3695                 struct usb_device *udev)
3696 {
3697         int ret, i;
3698         unsigned long flags;
3699         struct xhci_hcd *xhci;
3700         unsigned int slot_id;
3701         struct xhci_virt_device *virt_dev;
3702         struct xhci_command *reset_device_cmd;
3703         struct xhci_slot_ctx *slot_ctx;
3704         int old_active_eps = 0;
3705 
3706         ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3707         if (ret <= 0)
3708                 return ret;
3709         xhci = hcd_to_xhci(hcd);
3710         slot_id = udev->slot_id;
3711         virt_dev = xhci->devs[slot_id];
3712         if (!virt_dev) {
3713                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3714                                 "not exist. Re-allocate the device\n", slot_id);
3715                 ret = xhci_alloc_dev(hcd, udev);
3716                 if (ret == 1)
3717                         return 0;
3718                 else
3719                         return -EINVAL;
3720         }
3721 
3722         if (virt_dev->tt_info)
3723                 old_active_eps = virt_dev->tt_info->active_eps;
3724 
3725         if (virt_dev->udev != udev) {
3726                 
3727 
3728 
3729 
3730                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3731                                 "not match the udev. Re-allocate the device\n",
3732                                 slot_id);
3733                 ret = xhci_alloc_dev(hcd, udev);
3734                 if (ret == 1)
3735                         return 0;
3736                 else
3737                         return -EINVAL;
3738         }
3739 
3740         
3741         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3742         if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3743                                                 SLOT_STATE_DISABLED)
3744                 return 0;
3745 
3746         trace_xhci_discover_or_reset_device(slot_ctx);
3747 
3748         xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3749         
3750 
3751 
3752 
3753 
3754 
3755         reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3756         if (!reset_device_cmd) {
3757                 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3758                 return -ENOMEM;
3759         }
3760 
3761         
3762         spin_lock_irqsave(&xhci->lock, flags);
3763 
3764         ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3765         if (ret) {
3766                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3767                 spin_unlock_irqrestore(&xhci->lock, flags);
3768                 goto command_cleanup;
3769         }
3770         xhci_ring_cmd_db(xhci);
3771         spin_unlock_irqrestore(&xhci->lock, flags);
3772 
3773         
3774         wait_for_completion(reset_device_cmd->completion);
3775 
3776         
3777 
3778 
3779 
3780         ret = reset_device_cmd->status;
3781         switch (ret) {
3782         case COMP_COMMAND_ABORTED:
3783         case COMP_COMMAND_RING_STOPPED:
3784                 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3785                 ret = -ETIME;
3786                 goto command_cleanup;
3787         case COMP_SLOT_NOT_ENABLED_ERROR: 
3788         case COMP_CONTEXT_STATE_ERROR: 
3789                 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3790                                 slot_id,
3791                                 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3792                 xhci_dbg(xhci, "Not freeing device rings.\n");
3793                 
3794                 ret = 0;
3795                 goto command_cleanup;
3796         case COMP_SUCCESS:
3797                 xhci_dbg(xhci, "Successful reset device command.\n");
3798                 break;
3799         default:
3800                 if (xhci_is_vendor_info_code(xhci, ret))
3801                         break;
3802                 xhci_warn(xhci, "Unknown completion code %u for "
3803                                 "reset device command.\n", ret);
3804                 ret = -EINVAL;
3805                 goto command_cleanup;
3806         }
3807 
3808         
3809         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3810                 spin_lock_irqsave(&xhci->lock, flags);
3811                 
3812                 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3813                 spin_unlock_irqrestore(&xhci->lock, flags);
3814         }
3815 
3816         
3817         for (i = 1; i < 31; i++) {
3818                 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3819 
3820                 if (ep->ep_state & EP_HAS_STREAMS) {
3821                         xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3822                                         xhci_get_endpoint_address(i));
3823                         xhci_free_stream_info(xhci, ep->stream_info);
3824                         ep->stream_info = NULL;
3825                         ep->ep_state &= ~EP_HAS_STREAMS;
3826                 }
3827 
3828                 if (ep->ring) {
3829                         xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3830                         xhci_free_endpoint_ring(xhci, virt_dev, i);
3831                 }
3832                 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3833                         xhci_drop_ep_from_interval_table(xhci,
3834                                         &virt_dev->eps[i].bw_info,
3835                                         virt_dev->bw_table,
3836                                         udev,
3837                                         &virt_dev->eps[i],
3838                                         virt_dev->tt_info);
3839                 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3840         }
3841         
3842         xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3843         virt_dev->flags = 0;
3844         ret = 0;
3845 
3846 command_cleanup:
3847         xhci_free_command(xhci, reset_device_cmd);
3848         return ret;
3849 }
3850 
3851 
3852 
3853 
3854 
3855 
3856 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3857 {
3858         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3859         struct xhci_virt_device *virt_dev;
3860         struct xhci_slot_ctx *slot_ctx;
3861         int i, ret;
3862 
3863 #ifndef CONFIG_USB_DEFAULT_PERSIST
3864         
3865 
3866 
3867 
3868 
3869         if (xhci->quirks & XHCI_RESET_ON_RESUME)
3870                 pm_runtime_put_noidle(hcd->self.controller);
3871 #endif
3872 
3873         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3874         
3875 
3876 
3877         if (ret <= 0 && ret != -ENODEV)
3878                 return;
3879 
3880         virt_dev = xhci->devs[udev->slot_id];
3881         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3882         trace_xhci_free_dev(slot_ctx);
3883 
3884         
3885         for (i = 0; i < 31; i++) {
3886                 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3887                 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3888         }
3889         virt_dev->udev = NULL;
3890         ret = xhci_disable_slot(xhci, udev->slot_id);
3891         if (ret)
3892                 xhci_free_virt_device(xhci, udev->slot_id);
3893 }
3894 
3895 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3896 {
3897         struct xhci_command *command;
3898         unsigned long flags;
3899         u32 state;
3900         int ret = 0;
3901 
3902         command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3903         if (!command)
3904                 return -ENOMEM;
3905 
3906         xhci_debugfs_remove_slot(xhci, slot_id);
3907 
3908         spin_lock_irqsave(&xhci->lock, flags);
3909         
3910         state = readl(&xhci->op_regs->status);
3911         if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3912                         (xhci->xhc_state & XHCI_STATE_HALTED)) {
3913                 spin_unlock_irqrestore(&xhci->lock, flags);
3914                 kfree(command);
3915                 return -ENODEV;
3916         }
3917 
3918         ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3919                                 slot_id);
3920         if (ret) {
3921                 spin_unlock_irqrestore(&xhci->lock, flags);
3922                 kfree(command);
3923                 return ret;
3924         }
3925         xhci_ring_cmd_db(xhci);
3926         spin_unlock_irqrestore(&xhci->lock, flags);
3927         return ret;
3928 }
3929 
3930 
3931 
3932 
3933 
3934 
3935 
3936 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3937 {
3938         if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3939                 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3940                                 "Not enough ep ctxs: "
3941                                 "%u active, need to add 1, limit is %u.",
3942                                 xhci->num_active_eps, xhci->limit_active_eps);
3943                 return -ENOMEM;
3944         }
3945         xhci->num_active_eps += 1;
3946         xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3947                         "Adding 1 ep ctx, %u now active.",
3948                         xhci->num_active_eps);
3949         return 0;
3950 }
3951 
3952 
3953 
3954 
3955 
3956 
3957 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3958 {
3959         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3960         struct xhci_virt_device *vdev;
3961         struct xhci_slot_ctx *slot_ctx;
3962         unsigned long flags;
3963         int ret, slot_id;
3964         struct xhci_command *command;
3965 
3966         command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3967         if (!command)
3968                 return 0;
3969 
3970         spin_lock_irqsave(&xhci->lock, flags);
3971         ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3972         if (ret) {
3973                 spin_unlock_irqrestore(&xhci->lock, flags);
3974                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3975                 xhci_free_command(xhci, command);
3976                 return 0;
3977         }
3978         xhci_ring_cmd_db(xhci);
3979         spin_unlock_irqrestore(&xhci->lock, flags);
3980 
3981         wait_for_completion(command->completion);
3982         slot_id = command->slot_id;
3983 
3984         if (!slot_id || command->status != COMP_SUCCESS) {
3985                 xhci_err(xhci, "Error while assigning device slot ID\n");
3986                 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3987                                 HCS_MAX_SLOTS(
3988                                         readl(&xhci->cap_regs->hcs_params1)));
3989                 xhci_free_command(xhci, command);
3990                 return 0;
3991         }
3992 
3993         xhci_free_command(xhci, command);
3994 
3995         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3996                 spin_lock_irqsave(&xhci->lock, flags);
3997                 ret = xhci_reserve_host_control_ep_resources(xhci);
3998                 if (ret) {
3999                         spin_unlock_irqrestore(&xhci->lock, flags);
4000                         xhci_warn(xhci, "Not enough host resources, "
4001                                         "active endpoint contexts = %u\n",
4002                                         xhci->num_active_eps);
4003                         goto disable_slot;
4004                 }
4005                 spin_unlock_irqrestore(&xhci->lock, flags);
4006         }
4007         
4008 
4009 
4010 
4011         if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4012                 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4013                 goto disable_slot;
4014         }
4015         vdev = xhci->devs[slot_id];
4016         slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4017         trace_xhci_alloc_dev(slot_ctx);
4018 
4019         udev->slot_id = slot_id;
4020 
4021         xhci_debugfs_create_slot(xhci, slot_id);
4022 
4023 #ifndef CONFIG_USB_DEFAULT_PERSIST
4024         
4025 
4026 
4027 
4028         if (xhci->quirks & XHCI_RESET_ON_RESUME)
4029                 pm_runtime_get_noresume(hcd->self.controller);
4030 #endif
4031 
4032         
4033         
4034         return 1;
4035 
4036 disable_slot:
4037         ret = xhci_disable_slot(xhci, udev->slot_id);
4038         if (ret)
4039                 xhci_free_virt_device(xhci, udev->slot_id);
4040 
4041         return 0;
4042 }
4043 
4044 
4045 
4046 
4047 
4048 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4049                              enum xhci_setup_dev setup)
4050 {
4051         const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4052         unsigned long flags;
4053         struct xhci_virt_device *virt_dev;
4054         int ret = 0;
4055         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4056         struct xhci_slot_ctx *slot_ctx;
4057         struct xhci_input_control_ctx *ctrl_ctx;
4058         u64 temp_64;
4059         struct xhci_command *command = NULL;
4060 
4061         mutex_lock(&xhci->mutex);
4062 
4063         if (xhci->xhc_state) {  
4064                 ret = -ESHUTDOWN;
4065                 goto out;
4066         }
4067 
4068         if (!udev->slot_id) {
4069                 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4070                                 "Bad Slot ID %d", udev->slot_id);
4071                 ret = -EINVAL;
4072                 goto out;
4073         }
4074 
4075         virt_dev = xhci->devs[udev->slot_id];
4076 
4077         if (WARN_ON(!virt_dev)) {
4078                 
4079 
4080 
4081 
4082 
4083                 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4084                         udev->slot_id);
4085                 ret = -EINVAL;
4086                 goto out;
4087         }
4088         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4089         trace_xhci_setup_device_slot(slot_ctx);
4090 
4091         if (setup == SETUP_CONTEXT_ONLY) {
4092                 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4093                     SLOT_STATE_DEFAULT) {
4094                         xhci_dbg(xhci, "Slot already in default state\n");
4095                         goto out;
4096                 }
4097         }
4098 
4099         command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4100         if (!command) {
4101                 ret = -ENOMEM;
4102                 goto out;
4103         }
4104 
4105         command->in_ctx = virt_dev->in_ctx;
4106 
4107         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4108         ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4109         if (!ctrl_ctx) {
4110                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4111                                 __func__);
4112                 ret = -EINVAL;
4113                 goto out;
4114         }
4115         
4116 
4117 
4118 
4119 
4120         if (!slot_ctx->dev_info)
4121                 xhci_setup_addressable_virt_dev(xhci, udev);
4122         
4123         else
4124                 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4125         ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4126         ctrl_ctx->drop_flags = 0;
4127 
4128         trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4129                                 le32_to_cpu(slot_ctx->dev_info) >> 27);
4130 
4131         trace_xhci_address_ctrl_ctx(ctrl_ctx);
4132         spin_lock_irqsave(&xhci->lock, flags);
4133         trace_xhci_setup_device(virt_dev);
4134         ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4135                                         udev->slot_id, setup);
4136         if (ret) {
4137                 spin_unlock_irqrestore(&xhci->lock, flags);
4138                 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4139                                 "FIXME: allocate a command ring segment");
4140                 goto out;
4141         }
4142         xhci_ring_cmd_db(xhci);
4143         spin_unlock_irqrestore(&xhci->lock, flags);
4144 
4145         
4146         wait_for_completion(command->completion);
4147 
4148         
4149 
4150 
4151 
4152         switch (command->status) {
4153         case COMP_COMMAND_ABORTED:
4154         case COMP_COMMAND_RING_STOPPED:
4155                 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4156                 ret = -ETIME;
4157                 break;
4158         case COMP_CONTEXT_STATE_ERROR:
4159         case COMP_SLOT_NOT_ENABLED_ERROR:
4160                 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4161                          act, udev->slot_id);
4162                 ret = -EINVAL;
4163                 break;
4164         case COMP_USB_TRANSACTION_ERROR:
4165                 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4166 
4167                 mutex_unlock(&xhci->mutex);
4168                 ret = xhci_disable_slot(xhci, udev->slot_id);
4169                 if (!ret)
4170                         xhci_alloc_dev(hcd, udev);
4171                 kfree(command->completion);
4172                 kfree(command);
4173                 return -EPROTO;
4174         case COMP_INCOMPATIBLE_DEVICE_ERROR:
4175                 dev_warn(&udev->dev,
4176                          "ERROR: Incompatible device for setup %s command\n", act);
4177                 ret = -ENODEV;
4178                 break;
4179         case COMP_SUCCESS:
4180                 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4181                                "Successful setup %s command", act);
4182                 break;
4183         default:
4184                 xhci_err(xhci,
4185                          "ERROR: unexpected setup %s command completion code 0x%x.\n",
4186                          act, command->status);
4187                 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4188                 ret = -EINVAL;
4189                 break;
4190         }
4191         if (ret)
4192                 goto out;
4193         temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4194         xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4195                         "Op regs DCBAA ptr = %#016llx", temp_64);
4196         xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4197                 "Slot ID %d dcbaa entry @%p = %#016llx",
4198                 udev->slot_id,
4199                 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4200                 (unsigned long long)
4201                 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4202         xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4203                         "Output Context DMA address = %#08llx",
4204                         (unsigned long long)virt_dev->out_ctx->dma);
4205         trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4206                                 le32_to_cpu(slot_ctx->dev_info) >> 27);
4207         
4208 
4209 
4210 
4211         trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4212                                 le32_to_cpu(slot_ctx->dev_info) >> 27);
4213         
4214         ctrl_ctx->add_flags = 0;
4215         ctrl_ctx->drop_flags = 0;
4216         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4217         udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4218 
4219         xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4220                        "Internal device address = %d",
4221                        le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4222 out:
4223         mutex_unlock(&xhci->mutex);
4224         if (command) {
4225                 kfree(command->completion);
4226                 kfree(command);
4227         }
4228         return ret;
4229 }
4230 
4231 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4232 {
4233         return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4234 }
4235 
4236 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4237 {
4238         return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4239 }
4240 
4241 
4242 
4243 
4244 
4245 
4246 
4247 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4248 {
4249         struct xhci_hub *rhub;
4250 
4251         rhub = xhci_get_rhub(hcd);
4252         return rhub->ports[port1 - 1]->hw_portnum + 1;
4253 }
4254 
4255 
4256 
4257 
4258 
4259 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4260                         struct usb_device *udev, u16 max_exit_latency)
4261 {
4262         struct xhci_virt_device *virt_dev;
4263         struct xhci_command *command;
4264         struct xhci_input_control_ctx *ctrl_ctx;
4265         struct xhci_slot_ctx *slot_ctx;
4266         unsigned long flags;
4267         int ret;
4268 
4269         spin_lock_irqsave(&xhci->lock, flags);
4270 
4271         virt_dev = xhci->devs[udev->slot_id];
4272 
4273         
4274 
4275 
4276 
4277 
4278 
4279         if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4280                 spin_unlock_irqrestore(&xhci->lock, flags);
4281                 return 0;
4282         }
4283 
4284         
4285         command = xhci->lpm_command;
4286         ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4287         if (!ctrl_ctx) {
4288                 spin_unlock_irqrestore(&xhci->lock, flags);
4289                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4290                                 __func__);
4291                 return -ENOMEM;
4292         }
4293 
4294         xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4295         spin_unlock_irqrestore(&xhci->lock, flags);
4296 
4297         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4298         slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4299         slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4300         slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4301         slot_ctx->dev_state = 0;
4302 
4303         xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4304                         "Set up evaluate context for LPM MEL change.");
4305 
4306         
4307         ret = xhci_configure_endpoint(xhci, udev, command,
4308                         true, true);
4309 
4310         if (!ret) {
4311                 spin_lock_irqsave(&xhci->lock, flags);
4312                 virt_dev->current_mel = max_exit_latency;
4313                 spin_unlock_irqrestore(&xhci->lock, flags);
4314         }
4315         return ret;
4316 }
4317 
4318 #ifdef CONFIG_PM
4319 
4320 
4321 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4322         3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4323 
4324 
4325 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4326                                         struct usb_device *udev)
4327 {
4328         int u2del, besl, besl_host;
4329         int besl_device = 0;
4330         u32 field;
4331 
4332         u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4333         field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4334 
4335         if (field & USB_BESL_SUPPORT) {
4336                 for (besl_host = 0; besl_host < 16; besl_host++) {
4337                         if (xhci_besl_encoding[besl_host] >= u2del)
4338                                 break;
4339                 }
4340                 
4341                 if (field & USB_BESL_BASELINE_VALID)
4342                         besl_device = USB_GET_BESL_BASELINE(field);
4343                 else if (field & USB_BESL_DEEP_VALID)
4344                         besl_device = USB_GET_BESL_DEEP(field);
4345         } else {
4346                 if (u2del <= 50)
4347                         besl_host = 0;
4348                 else
4349                         besl_host = (u2del - 51) / 75 + 1;
4350         }
4351 
4352         besl = besl_host + besl_device;
4353         if (besl > 15)
4354                 besl = 15;
4355 
4356         return besl;
4357 }
4358 
4359 
4360 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4361 {
4362         u32 field;
4363         int l1;
4364         int besld = 0;
4365         int hirdm = 0;
4366 
4367         field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4368 
4369         
4370         l1 = udev->l1_params.timeout / 256;
4371 
4372         
4373         if (field & USB_BESL_DEEP_VALID) {
4374                 besld = USB_GET_BESL_DEEP(field);
4375                 hirdm = 1;
4376         }
4377 
4378         return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4379 }
4380 
4381 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4382                         struct usb_device *udev, int enable)
4383 {
4384         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4385         struct xhci_port **ports;
4386         __le32 __iomem  *pm_addr, *hlpm_addr;
4387         u32             pm_val, hlpm_val, field;
4388         unsigned int    port_num;
4389         unsigned long   flags;
4390         int             hird, exit_latency;
4391         int             ret;
4392 
4393         if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4394                         !udev->lpm_capable)
4395                 return -EPERM;
4396 
4397         if (!udev->parent || udev->parent->parent ||
4398                         udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4399                 return -EPERM;
4400 
4401         if (udev->usb2_hw_lpm_capable != 1)
4402                 return -EPERM;
4403 
4404         spin_lock_irqsave(&xhci->lock, flags);
4405 
4406         ports = xhci->usb2_rhub.ports;
4407         port_num = udev->portnum - 1;
4408         pm_addr = ports[port_num]->addr + PORTPMSC;
4409         pm_val = readl(pm_addr);
4410         hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4411 
4412         xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4413                         enable ? "enable" : "disable", port_num + 1);
4414 
4415         if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4416                 
4417                 if (udev->usb2_hw_lpm_besl_capable) {
4418                         
4419 
4420 
4421 
4422                         field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4423                         if ((field & USB_BESL_SUPPORT) &&
4424                             (field & USB_BESL_BASELINE_VALID))
4425                                 hird = USB_GET_BESL_BASELINE(field);
4426                         else
4427                                 hird = udev->l1_params.besl;
4428 
4429                         exit_latency = xhci_besl_encoding[hird];
4430                         spin_unlock_irqrestore(&xhci->lock, flags);
4431 
4432                         
4433 
4434 
4435 
4436 
4437 
4438 
4439                         mutex_lock(hcd->bandwidth_mutex);
4440                         ret = xhci_change_max_exit_latency(xhci, udev,
4441                                                            exit_latency);
4442                         mutex_unlock(hcd->bandwidth_mutex);
4443 
4444                         if (ret < 0)
4445                                 return ret;
4446                         spin_lock_irqsave(&xhci->lock, flags);
4447 
4448                         hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4449                         writel(hlpm_val, hlpm_addr);
4450                         
4451                         readl(hlpm_addr);
4452                 } else {
4453                         hird = xhci_calculate_hird_besl(xhci, udev);
4454                 }
4455 
4456                 pm_val &= ~PORT_HIRD_MASK;
4457                 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4458                 writel(pm_val, pm_addr);
4459                 pm_val = readl(pm_addr);
4460                 pm_val |= PORT_HLE;
4461                 writel(pm_val, pm_addr);
4462                 
4463                 readl(pm_addr);
4464         } else {
4465                 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4466                 writel(pm_val, pm_addr);
4467                 
4468                 readl(pm_addr);
4469                 if (udev->usb2_hw_lpm_besl_capable) {
4470                         spin_unlock_irqrestore(&xhci->lock, flags);
4471                         mutex_lock(hcd->bandwidth_mutex);
4472                         xhci_change_max_exit_latency(xhci, udev, 0);
4473                         mutex_unlock(hcd->bandwidth_mutex);
4474                         return 0;
4475                 }
4476         }
4477 
4478         spin_unlock_irqrestore(&xhci->lock, flags);
4479         return 0;
4480 }
4481 
4482 
4483 
4484 
4485 
4486 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4487                                            unsigned capability)
4488 {
4489         u32 port_offset, port_count;
4490         int i;
4491 
4492         for (i = 0; i < xhci->num_ext_caps; i++) {
4493                 if (xhci->ext_caps[i] & capability) {
4494                         
4495                         port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4496                         port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4497                         if (port >= port_offset &&
4498                             port < port_offset + port_count)
4499                                 return 1;
4500                 }
4501         }
4502         return 0;
4503 }
4504 
4505 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4506 {
4507         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4508         int             portnum = udev->portnum - 1;
4509 
4510         if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4511                 return 0;
4512 
4513         
4514         if (!udev->parent || udev->parent->parent ||
4515                         udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4516                 return 0;
4517 
4518         if (xhci->hw_lpm_support == 1 &&
4519                         xhci_check_usb2_port_capability(
4520                                 xhci, portnum, XHCI_HLC)) {
4521                 udev->usb2_hw_lpm_capable = 1;
4522                 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4523                 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4524                 if (xhci_check_usb2_port_capability(xhci, portnum,
4525                                         XHCI_BLC))
4526                         udev->usb2_hw_lpm_besl_capable = 1;
4527         }
4528 
4529         return 0;
4530 }
4531 
4532 
4533 
4534 
4535 static unsigned long long xhci_service_interval_to_ns(
4536                 struct usb_endpoint_descriptor *desc)
4537 {
4538         return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4539 }
4540 
4541 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4542                 enum usb3_link_state state)
4543 {
4544         unsigned long long sel;
4545         unsigned long long pel;
4546         unsigned int max_sel_pel;
4547         char *state_name;
4548 
4549         switch (state) {
4550         case USB3_LPM_U1:
4551                 
4552                 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4553                 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4554                 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4555                 state_name = "U1";
4556                 break;
4557         case USB3_LPM_U2:
4558                 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4559                 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4560                 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4561                 state_name = "U2";
4562                 break;
4563         default:
4564                 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4565                                 __func__);
4566                 return USB3_LPM_DISABLED;
4567         }
4568 
4569         if (sel <= max_sel_pel && pel <= max_sel_pel)
4570                 return USB3_LPM_DEVICE_INITIATED;
4571 
4572         if (sel > max_sel_pel)
4573                 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4574                                 "due to long SEL %llu ms\n",
4575                                 state_name, sel);
4576         else
4577                 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4578                                 "due to long PEL %llu ms\n",
4579                                 state_name, pel);
4580         return USB3_LPM_DISABLED;
4581 }
4582 
4583 
4584 
4585 
4586 
4587 
4588 
4589 
4590 
4591 static unsigned long long xhci_calculate_intel_u1_timeout(
4592                 struct usb_device *udev,
4593                 struct usb_endpoint_descriptor *desc)
4594 {
4595         unsigned long long timeout_ns;
4596         int ep_type;
4597         int intr_type;
4598 
4599         ep_type = usb_endpoint_type(desc);
4600         switch (ep_type) {
4601         case USB_ENDPOINT_XFER_CONTROL:
4602                 timeout_ns = udev->u1_params.sel * 3;
4603                 break;
4604         case USB_ENDPOINT_XFER_BULK:
4605                 timeout_ns = udev->u1_params.sel * 5;
4606                 break;
4607         case USB_ENDPOINT_XFER_INT:
4608                 intr_type = usb_endpoint_interrupt_type(desc);
4609                 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4610                         timeout_ns = udev->u1_params.sel * 3;
4611                         break;
4612                 }
4613                 
4614                 
4615         case USB_ENDPOINT_XFER_ISOC:
4616                 timeout_ns = xhci_service_interval_to_ns(desc);
4617                 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4618                 if (timeout_ns < udev->u1_params.sel * 2)
4619                         timeout_ns = udev->u1_params.sel * 2;
4620                 break;
4621         default:
4622                 return 0;
4623         }
4624 
4625         return timeout_ns;
4626 }
4627 
4628 
4629 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4630                 struct usb_device *udev,
4631                 struct usb_endpoint_descriptor *desc)
4632 {
4633         unsigned long long timeout_ns;
4634 
4635         
4636         if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4637                 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4638                         dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4639                         return USB3_LPM_DISABLED;
4640                 }
4641         }
4642 
4643         if (xhci->quirks & XHCI_INTEL_HOST)
4644                 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4645         else
4646                 timeout_ns = udev->u1_params.sel;
4647 
4648         
4649 
4650 
4651         if (timeout_ns == USB3_LPM_DISABLED)
4652                 timeout_ns = 1;
4653         else
4654                 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4655 
4656         
4657 
4658 
4659         if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4660                 return timeout_ns;
4661         dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4662                         "due to long timeout %llu ms\n", timeout_ns);
4663         return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4664 }
4665 
4666 
4667 
4668 
4669 
4670 
4671 
4672 static unsigned long long xhci_calculate_intel_u2_timeout(
4673                 struct usb_device *udev,
4674                 struct usb_endpoint_descriptor *desc)
4675 {
4676         unsigned long long timeout_ns;
4677         unsigned long long u2_del_ns;
4678 
4679         timeout_ns = 10 * 1000 * 1000;
4680 
4681         if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4682                         (xhci_service_interval_to_ns(desc) > timeout_ns))
4683                 timeout_ns = xhci_service_interval_to_ns(desc);
4684 
4685         u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4686         if (u2_del_ns > timeout_ns)
4687                 timeout_ns = u2_del_ns;
4688 
4689         return timeout_ns;
4690 }
4691 
4692 
4693 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4694                 struct usb_device *udev,
4695                 struct usb_endpoint_descriptor *desc)
4696 {
4697         unsigned long long timeout_ns;
4698 
4699         
4700         if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4701                 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4702                         dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4703                         return USB3_LPM_DISABLED;
4704                 }
4705         }
4706 
4707         if (xhci->quirks & XHCI_INTEL_HOST)
4708                 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4709         else
4710                 timeout_ns = udev->u2_params.sel;
4711 
4712         
4713         timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4714         
4715 
4716 
4717         if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4718                 return timeout_ns;
4719         dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4720                         "due to long timeout %llu ms\n", timeout_ns);
4721         return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4722 }
4723 
4724 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4725                 struct usb_device *udev,
4726                 struct usb_endpoint_descriptor *desc,
4727                 enum usb3_link_state state,
4728                 u16 *timeout)
4729 {
4730         if (state == USB3_LPM_U1)
4731                 return xhci_calculate_u1_timeout(xhci, udev, desc);
4732         else if (state == USB3_LPM_U2)
4733                 return xhci_calculate_u2_timeout(xhci, udev, desc);
4734 
4735         return USB3_LPM_DISABLED;
4736 }
4737 
4738 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4739                 struct usb_device *udev,
4740                 struct usb_endpoint_descriptor *desc,
4741                 enum usb3_link_state state,
4742                 u16 *timeout)
4743 {
4744         u16 alt_timeout;
4745 
4746         alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4747                 desc, state, timeout);
4748 
4749         
4750 
4751 
4752 
4753 
4754         if (alt_timeout == USB3_LPM_DISABLED) {
4755                 *timeout = alt_timeout;
4756                 return -E2BIG;
4757         }
4758         if (alt_timeout > *timeout)
4759                 *timeout = alt_timeout;
4760         return 0;
4761 }
4762 
4763 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4764                 struct usb_device *udev,
4765                 struct usb_host_interface *alt,
4766                 enum usb3_link_state state,
4767                 u16 *timeout)
4768 {
4769         int j;
4770 
4771         for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4772                 if (xhci_update_timeout_for_endpoint(xhci, udev,
4773                                         &alt->endpoint[j].desc, state, timeout))
4774                         return -E2BIG;
4775                 continue;
4776         }
4777         return 0;
4778 }
4779 
4780 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4781                 enum usb3_link_state state)
4782 {
4783         struct usb_device *parent;
4784         unsigned int num_hubs;
4785 
4786         if (state == USB3_LPM_U2)
4787                 return 0;
4788 
4789         
4790         for (parent = udev->parent, num_hubs = 0; parent->parent;
4791                         parent = parent->parent)
4792                 num_hubs++;
4793 
4794         if (num_hubs < 2)
4795                 return 0;
4796 
4797         dev_dbg(&udev->dev, "Disabling U1 link state for device"
4798                         " below second-tier hub.\n");
4799         dev_dbg(&udev->dev, "Plug device into first-tier hub "
4800                         "to decrease power consumption.\n");
4801         return -E2BIG;
4802 }
4803 
4804 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4805                 struct usb_device *udev,
4806                 enum usb3_link_state state)
4807 {
4808         if (xhci->quirks & XHCI_INTEL_HOST)
4809                 return xhci_check_intel_tier_policy(udev, state);
4810         else
4811                 return 0;
4812 }
4813 
4814 
4815 
4816 
4817 
4818 
4819 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4820                         struct usb_device *udev, enum usb3_link_state state)
4821 {
4822         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4823         struct usb_host_config *config;
4824         char *state_name;
4825         int i;
4826         u16 timeout = USB3_LPM_DISABLED;
4827 
4828         if (state == USB3_LPM_U1)
4829                 state_name = "U1";
4830         else if (state == USB3_LPM_U2)
4831                 state_name = "U2";
4832         else {
4833                 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4834                                 state);
4835                 return timeout;
4836         }
4837 
4838         if (xhci_check_tier_policy(xhci, udev, state) < 0)
4839                 return timeout;
4840 
4841         
4842 
4843 
4844         if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4845                         state, &timeout))
4846                 return timeout;
4847 
4848         config = udev->actconfig;
4849         if (!config)
4850                 return timeout;
4851 
4852         for (i = 0; i < config->desc.bNumInterfaces; i++) {
4853                 struct usb_driver *driver;
4854                 struct usb_interface *intf = config->interface[i];
4855 
4856                 if (!intf)
4857                         continue;
4858 
4859                 
4860 
4861 
4862                 if (intf->dev.driver) {
4863                         driver = to_usb_driver(intf->dev.driver);
4864                         if (driver && driver->disable_hub_initiated_lpm) {
4865                                 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4866                                         state_name, driver->name);
4867                                 timeout = xhci_get_timeout_no_hub_lpm(udev,
4868                                                                       state);
4869                                 if (timeout == USB3_LPM_DISABLED)
4870                                         return timeout;
4871                         }
4872                 }
4873 
4874                 
4875                 if (!intf->cur_altsetting)
4876                         continue;
4877 
4878                 if (xhci_update_timeout_for_interface(xhci, udev,
4879                                         intf->cur_altsetting,
4880                                         state, &timeout))
4881                         return timeout;
4882         }
4883         return timeout;
4884 }
4885 
4886 static int calculate_max_exit_latency(struct usb_device *udev,
4887                 enum usb3_link_state state_changed,
4888                 u16 hub_encoded_timeout)
4889 {
4890         unsigned long long u1_mel_us = 0;
4891         unsigned long long u2_mel_us = 0;
4892         unsigned long long mel_us = 0;
4893         bool disabling_u1;
4894         bool disabling_u2;
4895         bool enabling_u1;
4896         bool enabling_u2;
4897 
4898         disabling_u1 = (state_changed == USB3_LPM_U1 &&
4899                         hub_encoded_timeout == USB3_LPM_DISABLED);
4900         disabling_u2 = (state_changed == USB3_LPM_U2 &&
4901                         hub_encoded_timeout == USB3_LPM_DISABLED);
4902 
4903         enabling_u1 = (state_changed == USB3_LPM_U1 &&
4904                         hub_encoded_timeout != USB3_LPM_DISABLED);
4905         enabling_u2 = (state_changed == USB3_LPM_U2 &&
4906                         hub_encoded_timeout != USB3_LPM_DISABLED);
4907 
4908         
4909 
4910 
4911         if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4912                         enabling_u1)
4913                 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4914         if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4915                         enabling_u2)
4916                 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4917 
4918         if (u1_mel_us > u2_mel_us)
4919                 mel_us = u1_mel_us;
4920         else
4921                 mel_us = u2_mel_us;
4922         
4923         if (mel_us > MAX_EXIT) {
4924                 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4925                                 "is too big.\n", mel_us);
4926                 return -E2BIG;
4927         }
4928         return mel_us;
4929 }
4930 
4931 
4932 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4933                         struct usb_device *udev, enum usb3_link_state state)
4934 {
4935         struct xhci_hcd *xhci;
4936         u16 hub_encoded_timeout;
4937         int mel;
4938         int ret;
4939 
4940         xhci = hcd_to_xhci(hcd);
4941         
4942 
4943 
4944 
4945         if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4946                         !xhci->devs[udev->slot_id])
4947                 return USB3_LPM_DISABLED;
4948 
4949         hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4950         mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4951         if (mel < 0) {
4952                 
4953                 hub_encoded_timeout = USB3_LPM_DISABLED;
4954                 mel = 0;
4955         }
4956 
4957         ret = xhci_change_max_exit_latency(xhci, udev, mel);
4958         if (ret)
4959                 return ret;
4960         return hub_encoded_timeout;
4961 }
4962 
4963 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4964                         struct usb_device *udev, enum usb3_link_state state)
4965 {
4966         struct xhci_hcd *xhci;
4967         u16 mel;
4968 
4969         xhci = hcd_to_xhci(hcd);
4970         if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4971                         !xhci->devs[udev->slot_id])
4972                 return 0;
4973 
4974         mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4975         return xhci_change_max_exit_latency(xhci, udev, mel);
4976 }
4977 #else 
4978 
4979 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4980                                 struct usb_device *udev, int enable)
4981 {
4982         return 0;
4983 }
4984 
4985 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4986 {
4987         return 0;
4988 }
4989 
4990 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4991                         struct usb_device *udev, enum usb3_link_state state)
4992 {
4993         return USB3_LPM_DISABLED;
4994 }
4995 
4996 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4997                         struct usb_device *udev, enum usb3_link_state state)
4998 {
4999         return 0;
5000 }
5001 #endif  
5002 
5003 
5004 
5005 
5006 
5007 
5008 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5009                         struct usb_tt *tt, gfp_t mem_flags)
5010 {
5011         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5012         struct xhci_virt_device *vdev;
5013         struct xhci_command *config_cmd;
5014         struct xhci_input_control_ctx *ctrl_ctx;
5015         struct xhci_slot_ctx *slot_ctx;
5016         unsigned long flags;
5017         unsigned think_time;
5018         int ret;
5019 
5020         
5021         if (!hdev->parent)
5022                 return 0;
5023 
5024         vdev = xhci->devs[hdev->slot_id];
5025         if (!vdev) {
5026                 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5027                 return -EINVAL;
5028         }
5029 
5030         config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5031         if (!config_cmd)
5032                 return -ENOMEM;
5033 
5034         ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5035         if (!ctrl_ctx) {
5036                 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5037                                 __func__);
5038                 xhci_free_command(xhci, config_cmd);
5039                 return -ENOMEM;
5040         }
5041 
5042         spin_lock_irqsave(&xhci->lock, flags);
5043         if (hdev->speed == USB_SPEED_HIGH &&
5044                         xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5045                 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5046                 xhci_free_command(xhci, config_cmd);
5047                 spin_unlock_irqrestore(&xhci->lock, flags);
5048                 return -ENOMEM;
5049         }
5050 
5051         xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5052         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5053         slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5054         slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5055         
5056 
5057 
5058 
5059 
5060         if (tt->multi)
5061                 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5062         else if (hdev->speed == USB_SPEED_FULL)
5063                 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5064 
5065         if (xhci->hci_version > 0x95) {
5066                 xhci_dbg(xhci, "xHCI version %x needs hub "
5067                                 "TT think time and number of ports\n",
5068                                 (unsigned int) xhci->hci_version);
5069                 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5070                 
5071 
5072 
5073 
5074 
5075 
5076 
5077                 think_time = tt->think_time;
5078                 if (think_time != 0)
5079                         think_time = (think_time / 666) - 1;
5080                 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5081                         slot_ctx->tt_info |=
5082                                 cpu_to_le32(TT_THINK_TIME(think_time));
5083         } else {
5084                 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5085                                 "TT think time or number of ports\n",
5086                                 (unsigned int) xhci->hci_version);
5087         }
5088         slot_ctx->dev_state = 0;
5089         spin_unlock_irqrestore(&xhci->lock, flags);
5090 
5091         xhci_dbg(xhci, "Set up %s for hub device.\n",
5092                         (xhci->hci_version > 0x95) ?
5093                         "configure endpoint" : "evaluate context");
5094 
5095         
5096 
5097 
5098         if (xhci->hci_version > 0x95)
5099                 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5100                                 false, false);
5101         else
5102                 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5103                                 true, false);
5104 
5105         xhci_free_command(xhci, config_cmd);
5106         return ret;
5107 }
5108 
5109 static int xhci_get_frame(struct usb_hcd *hcd)
5110 {
5111         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5112         
5113         return readl(&xhci->run_regs->microframe_index) >> 3;
5114 }
5115 
5116 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5117 {
5118         struct xhci_hcd         *xhci;
5119         
5120 
5121 
5122 
5123         struct device           *dev = hcd->self.sysdev;
5124         unsigned int            minor_rev;
5125         int                     retval;
5126 
5127         
5128         hcd->self.sg_tablesize = ~0;
5129 
5130         
5131         hcd->self.no_sg_constraint = 1;
5132 
5133         
5134         hcd->self.no_stop_on_short = 1;
5135 
5136         xhci = hcd_to_xhci(hcd);
5137 
5138         if (usb_hcd_is_primary_hcd(hcd)) {
5139                 xhci->main_hcd = hcd;
5140                 xhci->usb2_rhub.hcd = hcd;
5141                 
5142 
5143 
5144                 hcd->speed = HCD_USB2;
5145                 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5146                 
5147 
5148 
5149 
5150 
5151                 hcd->has_tt = 1;
5152         } else {
5153                 
5154 
5155 
5156 
5157 
5158 
5159 
5160 
5161 
5162                 if (xhci->usb3_rhub.min_rev == 0x1)
5163                         minor_rev = 1;
5164                 else
5165                         minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5166 
5167                 switch (minor_rev) {
5168                 case 2:
5169                         hcd->speed = HCD_USB32;
5170                         hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5171                         hcd->self.root_hub->rx_lanes = 2;
5172                         hcd->self.root_hub->tx_lanes = 2;
5173                         break;
5174                 case 1:
5175                         hcd->speed = HCD_USB31;
5176                         hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5177                         break;
5178                 }
5179                 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5180                           minor_rev,
5181                           minor_rev ? "Enhanced " : "");
5182 
5183                 xhci->usb3_rhub.hcd = hcd;
5184                 
5185 
5186 
5187                 return 0;
5188         }
5189 
5190         mutex_init(&xhci->mutex);
5191         xhci->cap_regs = hcd->regs;
5192         xhci->op_regs = hcd->regs +
5193                 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5194         xhci->run_regs = hcd->regs +
5195                 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5196         
5197         xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5198         xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5199         xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5200         xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5201         xhci->hci_version = HC_VERSION(xhci->hcc_params);
5202         xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5203         if (xhci->hci_version > 0x100)
5204                 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5205 
5206         xhci->quirks |= quirks;
5207 
5208         get_quirks(dev, xhci);
5209 
5210         
5211 
5212 
5213 
5214         if (xhci->hci_version > 0x96)
5215                 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5216 
5217         
5218         retval = xhci_halt(xhci);
5219         if (retval)
5220                 return retval;
5221 
5222         xhci_zero_64b_regs(xhci);
5223 
5224         xhci_dbg(xhci, "Resetting HCD\n");
5225         
5226         retval = xhci_reset(xhci);
5227         if (retval)
5228                 return retval;
5229         xhci_dbg(xhci, "Reset complete\n");
5230 
5231         
5232 
5233 
5234 
5235 
5236 
5237 
5238         if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5239                 xhci->hcc_params &= ~BIT(0);
5240 
5241         
5242 
5243         if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5244                         !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5245                 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5246                 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5247         } else {
5248                 
5249 
5250 
5251 
5252                 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5253                 if (retval)
5254                         return retval;
5255                 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5256                 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5257         }
5258 
5259         xhci_dbg(xhci, "Calling HCD init\n");
5260         
5261         retval = xhci_init(hcd);
5262         if (retval)
5263                 return retval;
5264         xhci_dbg(xhci, "Called HCD init\n");
5265 
5266         xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5267                   xhci->hcc_params, xhci->hci_version, xhci->quirks);
5268 
5269         return 0;
5270 }
5271 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5272 
5273 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5274                 struct usb_host_endpoint *ep)
5275 {
5276         struct xhci_hcd *xhci;
5277         struct usb_device *udev;
5278         unsigned int slot_id;
5279         unsigned int ep_index;
5280         unsigned long flags;
5281 
5282         xhci = hcd_to_xhci(hcd);
5283 
5284         spin_lock_irqsave(&xhci->lock, flags);
5285         udev = (struct usb_device *)ep->hcpriv;
5286         slot_id = udev->slot_id;
5287         ep_index = xhci_get_endpoint_index(&ep->desc);
5288 
5289         xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5290         xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5291         spin_unlock_irqrestore(&xhci->lock, flags);
5292 }
5293 
5294 static const struct hc_driver xhci_hc_driver = {
5295         .description =          "xhci-hcd",
5296         .product_desc =         "xHCI Host Controller",
5297         .hcd_priv_size =        sizeof(struct xhci_hcd),
5298 
5299         
5300 
5301 
5302         .irq =                  xhci_irq,
5303         .flags =                HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
5304 
5305         
5306 
5307 
5308         .reset =                NULL, 
5309         .start =                xhci_run,
5310         .stop =                 xhci_stop,
5311         .shutdown =             xhci_shutdown,
5312 
5313         
5314 
5315 
5316         .map_urb_for_dma =      xhci_map_urb_for_dma,
5317         .urb_enqueue =          xhci_urb_enqueue,
5318         .urb_dequeue =          xhci_urb_dequeue,
5319         .alloc_dev =            xhci_alloc_dev,
5320         .free_dev =             xhci_free_dev,
5321         .alloc_streams =        xhci_alloc_streams,
5322         .free_streams =         xhci_free_streams,
5323         .add_endpoint =         xhci_add_endpoint,
5324         .drop_endpoint =        xhci_drop_endpoint,
5325         .endpoint_disable =     xhci_endpoint_disable,
5326         .endpoint_reset =       xhci_endpoint_reset,
5327         .check_bandwidth =      xhci_check_bandwidth,
5328         .reset_bandwidth =      xhci_reset_bandwidth,
5329         .address_device =       xhci_address_device,
5330         .enable_device =        xhci_enable_device,
5331         .update_hub_device =    xhci_update_hub_device,
5332         .reset_device =         xhci_discover_or_reset_device,
5333 
5334         
5335 
5336 
5337         .get_frame_number =     xhci_get_frame,
5338 
5339         
5340 
5341 
5342         .hub_control =          xhci_hub_control,
5343         .hub_status_data =      xhci_hub_status_data,
5344         .bus_suspend =          xhci_bus_suspend,
5345         .bus_resume =           xhci_bus_resume,
5346         .get_resuming_ports =   xhci_get_resuming_ports,
5347 
5348         
5349 
5350 
5351         .update_device =        xhci_update_device,
5352         .set_usb2_hw_lpm =      xhci_set_usb2_hardware_lpm,
5353         .enable_usb3_lpm_timeout =      xhci_enable_usb3_lpm_timeout,
5354         .disable_usb3_lpm_timeout =     xhci_disable_usb3_lpm_timeout,
5355         .find_raw_port_number = xhci_find_raw_port_number,
5356         .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5357 };
5358 
5359 void xhci_init_driver(struct hc_driver *drv,
5360                       const struct xhci_driver_overrides *over)
5361 {
5362         BUG_ON(!over);
5363 
5364         
5365         *drv = xhci_hc_driver;
5366 
5367         if (over) {
5368                 drv->hcd_priv_size += over->extra_priv_size;
5369                 if (over->reset)
5370                         drv->reset = over->reset;
5371                 if (over->start)
5372                         drv->start = over->start;
5373         }
5374 }
5375 EXPORT_SYMBOL_GPL(xhci_init_driver);
5376 
5377 MODULE_DESCRIPTION(DRIVER_DESC);
5378 MODULE_AUTHOR(DRIVER_AUTHOR);
5379 MODULE_LICENSE("GPL");
5380 
5381 static int __init xhci_hcd_init(void)
5382 {
5383         
5384 
5385 
5386 
5387         BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5388         BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5389         BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5390         
5391 
5392 
5393         BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5394         BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5395         BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5396         BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5397         BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5398         
5399         BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5400 
5401         if (usb_disabled())
5402                 return -ENODEV;
5403 
5404         xhci_debugfs_create_root();
5405 
5406         return 0;
5407 }
5408 
5409 
5410 
5411 
5412 
5413 static void __exit xhci_hcd_fini(void)
5414 {
5415         xhci_debugfs_remove_root();
5416 }
5417 
5418 module_init(xhci_hcd_init);
5419 module_exit(xhci_hcd_fini);