Lines Matching full:xhci
3 * xHCI host controller driver
21 #include "xhci.h"
22 #include "xhci-trace.h"
23 #include "xhci-debugfs.h"
24 #include "xhci-dbgcap.h"
84 * Disable interrupts and begin the xHCI halting process.
86 void xhci_quiesce(struct xhci_hcd *xhci) in xhci_quiesce() argument
93 halted = readl(&xhci->op_regs->status) & STS_HALT; in xhci_quiesce()
97 cmd = readl(&xhci->op_regs->command); in xhci_quiesce()
99 writel(cmd, &xhci->op_regs->command); in xhci_quiesce()
110 int xhci_halt(struct xhci_hcd *xhci) in xhci_halt() argument
113 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); in xhci_halt()
114 xhci_quiesce(xhci); in xhci_halt()
116 ret = xhci_handshake(&xhci->op_regs->status, in xhci_halt()
119 xhci_warn(xhci, "Host halt failed, %d\n", ret); in xhci_halt()
122 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_halt()
123 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_halt()
130 int xhci_start(struct xhci_hcd *xhci) in xhci_start() argument
135 temp = readl(&xhci->op_regs->command); in xhci_start()
137 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", in xhci_start()
139 writel(temp, &xhci->op_regs->command); in xhci_start()
145 ret = xhci_handshake(&xhci->op_regs->status, in xhci_start()
148 xhci_err(xhci, "Host took too long to start, " in xhci_start()
153 xhci->xhc_state = 0; in xhci_start()
165 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) in xhci_reset() argument
171 state = readl(&xhci->op_regs->status); in xhci_reset()
174 xhci_warn(xhci, "Host not accessible, reset failed.\n"); in xhci_reset()
179 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); in xhci_reset()
183 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); in xhci_reset()
184 command = readl(&xhci->op_regs->command); in xhci_reset()
186 writel(command, &xhci->op_regs->command); in xhci_reset()
188 /* Existing Intel xHCI controllers require a delay of 1 mS, in xhci_reset()
195 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_reset()
198 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); in xhci_reset()
202 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_reset()
203 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); in xhci_reset()
205 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_reset()
208 * xHCI cannot write to any doorbells or operational registers other in xhci_reset()
211 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); in xhci_reset()
213 xhci->usb2_rhub.bus_state.port_c_suspend = 0; in xhci_reset()
214 xhci->usb2_rhub.bus_state.suspended_ports = 0; in xhci_reset()
215 xhci->usb2_rhub.bus_state.resuming_ports = 0; in xhci_reset()
216 xhci->usb3_rhub.bus_state.port_c_suspend = 0; in xhci_reset()
217 xhci->usb3_rhub.bus_state.suspended_ports = 0; in xhci_reset()
218 xhci->usb3_rhub.bus_state.resuming_ports = 0; in xhci_reset()
223 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) in xhci_zero_64b_regs() argument
225 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_zero_64b_regs()
244 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) in xhci_zero_64b_regs()
247 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); in xhci_zero_64b_regs()
250 val = readl(&xhci->op_regs->command); in xhci_zero_64b_regs()
252 writel(val, &xhci->op_regs->command); in xhci_zero_64b_regs()
255 val = readl(&xhci->op_regs->status); in xhci_zero_64b_regs()
257 writel(val, &xhci->op_regs->status); in xhci_zero_64b_regs()
260 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
262 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
263 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
265 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
267 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), in xhci_zero_64b_regs()
268 ARRAY_SIZE(xhci->run_regs->ir_set)); in xhci_zero_64b_regs()
273 ir = &xhci->run_regs->ir_set[i]; in xhci_zero_64b_regs()
274 val = xhci_read_64(xhci, &ir->erst_base); in xhci_zero_64b_regs()
276 xhci_write_64(xhci, 0, &ir->erst_base); in xhci_zero_64b_regs()
277 val= xhci_read_64(xhci, &ir->erst_dequeue); in xhci_zero_64b_regs()
279 xhci_write_64(xhci, 0, &ir->erst_dequeue); in xhci_zero_64b_regs()
283 err = xhci_handshake(&xhci->op_regs->status, in xhci_zero_64b_regs()
287 xhci_info(xhci, "Fault detected\n"); in xhci_zero_64b_regs()
294 static int xhci_setup_msi(struct xhci_hcd *xhci) in xhci_setup_msi() argument
300 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_setup_msi()
304 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
310 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msi()
312 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
323 static int xhci_setup_msix(struct xhci_hcd *xhci) in xhci_setup_msix() argument
326 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_setup_msix()
332 * with max number of interrupters based on the xhci HCSPARAMS1. in xhci_setup_msix()
336 xhci->msix_count = min(num_online_cpus() + 1, in xhci_setup_msix()
337 HCS_MAX_INTRS(xhci->hcs_params1)); in xhci_setup_msix()
339 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, in xhci_setup_msix()
342 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msix()
347 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
349 "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msix()
358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); in xhci_setup_msix()
360 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_setup_msix()
366 static void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
368 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cleanup_msix()
371 if (xhci->quirks & XHCI_PLAT) in xhci_cleanup_msix()
381 for (i = 0; i < xhci->msix_count; i++) in xhci_cleanup_msix()
382 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
384 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
391 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
393 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_msix_sync_irqs()
399 for (i = 0; i < xhci->msix_count; i++) in xhci_msix_sync_irqs()
406 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_try_enable_msi() local
410 /* The xhci platform device has set up IRQs through usb_add_hcd. */ in xhci_try_enable_msi()
411 if (xhci->quirks & XHCI_PLAT) in xhci_try_enable_msi()
414 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_try_enable_msi()
419 if (xhci->quirks & XHCI_BROKEN_MSI) in xhci_try_enable_msi()
427 ret = xhci_setup_msix(xhci); in xhci_try_enable_msi()
430 ret = xhci_setup_msi(xhci); in xhci_try_enable_msi()
438 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); in xhci_try_enable_msi()
451 xhci_err(xhci, "request interrupt %d failed\n", in xhci_try_enable_msi()
466 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
470 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
478 struct xhci_hcd *xhci; in compliance_mode_recovery() local
484 xhci = from_timer(xhci, t, comp_mode_recovery_timer); in compliance_mode_recovery()
485 rhub = &xhci->usb3_rhub; in compliance_mode_recovery()
494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
497 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
499 hcd = xhci->shared_hcd; in compliance_mode_recovery()
508 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) in compliance_mode_recovery()
509 mod_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery()
520 * status event is generated when entering compliance mode (per xhci spec),
523 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) in compliance_mode_recovery_timer_init() argument
525 xhci->port_status_u0 = 0; in compliance_mode_recovery_timer_init()
526 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, in compliance_mode_recovery_timer_init()
528 xhci->comp_mode_recovery_timer.expires = jiffies + in compliance_mode_recovery_timer_init()
531 add_timer(&xhci->comp_mode_recovery_timer); in compliance_mode_recovery_timer_init()
532 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery_timer_init()
563 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) in xhci_all_ports_seen_u0() argument
565 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); in xhci_all_ports_seen_u0()
578 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_init() local
581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); in xhci_init()
582 spin_lock_init(&xhci->lock); in xhci_init()
583 if (xhci->hci_version == 0x95 && link_quirk) { in xhci_init()
584 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_init()
586 xhci->quirks |= XHCI_LINK_TRB_QUIRK; in xhci_init()
588 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_init()
589 "xHCI doesn't need link TRB QUIRK"); in xhci_init()
591 retval = xhci_mem_init(xhci, GFP_KERNEL); in xhci_init()
592 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); in xhci_init()
596 xhci->quirks |= XHCI_COMP_MODE_QUIRK; in xhci_init()
597 compliance_mode_recovery_timer_init(xhci); in xhci_init()
606 static int xhci_run_finished(struct xhci_hcd *xhci) in xhci_run_finished() argument
608 if (xhci_start(xhci)) { in xhci_run_finished()
609 xhci_halt(xhci); in xhci_run_finished()
612 xhci->shared_hcd->state = HC_STATE_RUNNING; in xhci_run_finished()
613 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_run_finished()
615 if (xhci->quirks & XHCI_NEC_HOST) in xhci_run_finished()
616 xhci_ring_cmd_db(xhci); in xhci_run_finished()
618 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run_finished()
640 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_run() local
642 /* Start the xHCI host controller running only after the USB 2.0 roothub in xhci_run()
648 return xhci_run_finished(xhci); in xhci_run()
650 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); in xhci_run()
656 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_run()
658 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
661 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
663 temp = readl(&xhci->ir_set->irq_control); in xhci_run()
665 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; in xhci_run()
666 writel(temp, &xhci->ir_set->irq_control); in xhci_run()
669 temp = readl(&xhci->op_regs->command); in xhci_run()
671 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
673 writel(temp, &xhci->op_regs->command); in xhci_run()
675 temp = readl(&xhci->ir_set->irq_pending); in xhci_run()
676 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
678 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); in xhci_run()
679 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); in xhci_run()
681 if (xhci->quirks & XHCI_NEC_HOST) { in xhci_run()
684 command = xhci_alloc_command(xhci, false, GFP_KERNEL); in xhci_run()
688 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, in xhci_run()
691 xhci_free_command(xhci, command); in xhci_run()
693 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
696 xhci_dbc_init(xhci); in xhci_run()
698 xhci_debugfs_init(xhci); in xhci_run()
705 * Stop xHCI driver.
716 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_stop() local
718 mutex_lock(&xhci->mutex); in xhci_stop()
722 mutex_unlock(&xhci->mutex); in xhci_stop()
726 xhci_dbc_exit(xhci); in xhci_stop()
728 spin_lock_irq(&xhci->lock); in xhci_stop()
729 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_stop()
730 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_stop()
731 xhci_halt(xhci); in xhci_stop()
732 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); in xhci_stop()
733 spin_unlock_irq(&xhci->lock); in xhci_stop()
735 xhci_cleanup_msix(xhci); in xhci_stop()
738 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_stop()
739 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_stop()
740 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_stop()
741 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_stop()
746 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_stop()
749 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
751 temp = readl(&xhci->op_regs->status); in xhci_stop()
752 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_stop()
753 temp = readl(&xhci->ir_set->irq_pending); in xhci_stop()
754 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_stop()
756 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); in xhci_stop()
757 xhci_mem_cleanup(xhci); in xhci_stop()
758 xhci_debugfs_exit(xhci); in xhci_stop()
759 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
761 readl(&xhci->op_regs->status)); in xhci_stop()
762 mutex_unlock(&xhci->mutex); in xhci_stop()
776 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_shutdown() local
778 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) in xhci_shutdown()
782 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", in xhci_shutdown()
787 if (xhci->shared_hcd) { in xhci_shutdown()
788 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_shutdown()
789 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_shutdown()
792 spin_lock_irq(&xhci->lock); in xhci_shutdown()
793 xhci_halt(xhci); in xhci_shutdown()
799 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || in xhci_shutdown()
800 xhci->quirks & XHCI_RESET_TO_DEFAULT) in xhci_shutdown()
801 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); in xhci_shutdown()
803 spin_unlock_irq(&xhci->lock); in xhci_shutdown()
805 xhci_cleanup_msix(xhci); in xhci_shutdown()
807 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_shutdown()
809 readl(&xhci->op_regs->status)); in xhci_shutdown()
814 static void xhci_save_registers(struct xhci_hcd *xhci) in xhci_save_registers() argument
816 xhci->s3.command = readl(&xhci->op_regs->command); in xhci_save_registers()
817 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); in xhci_save_registers()
818 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_save_registers()
819 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); in xhci_save_registers()
820 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); in xhci_save_registers()
821 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); in xhci_save_registers()
822 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_save_registers()
823 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_save_registers()
824 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); in xhci_save_registers()
827 static void xhci_restore_registers(struct xhci_hcd *xhci) in xhci_restore_registers() argument
829 writel(xhci->s3.command, &xhci->op_regs->command); in xhci_restore_registers()
830 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); in xhci_restore_registers()
831 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); in xhci_restore_registers()
832 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); in xhci_restore_registers()
833 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); in xhci_restore_registers()
834 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); in xhci_restore_registers()
835 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); in xhci_restore_registers()
836 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); in xhci_restore_registers()
837 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); in xhci_restore_registers()
840 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) in xhci_set_cmd_ring_deq() argument
845 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
847 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in xhci_set_cmd_ring_deq()
848 xhci->cmd_ring->dequeue) & in xhci_set_cmd_ring_deq()
850 xhci->cmd_ring->cycle_state; in xhci_set_cmd_ring_deq()
851 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_set_cmd_ring_deq()
854 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
866 static void xhci_clear_command_ring(struct xhci_hcd *xhci) in xhci_clear_command_ring() argument
871 ring = xhci->cmd_ring; in xhci_clear_command_ring()
901 xhci_set_cmd_ring_deq(xhci); in xhci_clear_command_ring()
909 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
913 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, in xhci_disable_hub_port_wake() argument
921 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_hub_port_wake()
938 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", in xhci_disable_hub_port_wake()
942 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_hub_port_wake()
945 static bool xhci_pending_portevent(struct xhci_hcd *xhci) in xhci_pending_portevent() argument
952 status = readl(&xhci->op_regs->status); in xhci_pending_portevent()
958 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. in xhci_pending_portevent()
961 port_index = xhci->usb2_rhub.num_ports; in xhci_pending_portevent()
962 ports = xhci->usb2_rhub.ports; in xhci_pending_portevent()
969 port_index = xhci->usb3_rhub.num_ports; in xhci_pending_portevent()
970 ports = xhci->usb3_rhub.ports; in xhci_pending_portevent()
986 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) in xhci_suspend() argument
990 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_suspend()
998 xhci->shared_hcd->state != HC_STATE_SUSPENDED) in xhci_suspend()
1002 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); in xhci_suspend()
1003 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); in xhci_suspend()
1008 xhci_dbc_suspend(xhci); in xhci_suspend()
1011 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", in xhci_suspend()
1015 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_suspend()
1016 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_suspend()
1018 if (xhci->quirks & XHCI_SUSPEND_DELAY) in xhci_suspend()
1021 spin_lock_irq(&xhci->lock); in xhci_suspend()
1023 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_suspend()
1028 command = readl(&xhci->op_regs->command); in xhci_suspend()
1030 writel(command, &xhci->op_regs->command); in xhci_suspend()
1033 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; in xhci_suspend()
1035 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1037 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); in xhci_suspend()
1038 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1041 xhci_clear_command_ring(xhci); in xhci_suspend()
1044 xhci_save_registers(xhci); in xhci_suspend()
1047 command = readl(&xhci->op_regs->command); in xhci_suspend()
1049 writel(command, &xhci->op_regs->command); in xhci_suspend()
1050 xhci->broken_suspend = 0; in xhci_suspend()
1051 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1059 * if SRE and HCE bits are not set (as per xhci in xhci_suspend()
1062 res = readl(&xhci->op_regs->status); in xhci_suspend()
1063 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && in xhci_suspend()
1066 xhci->broken_suspend = 1; in xhci_suspend()
1068 xhci_warn(xhci, "WARN: xHC save state timeout\n"); in xhci_suspend()
1069 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1073 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1076 * Deleting Compliance Mode Recovery Timer because the xHCI Host in xhci_suspend()
1079 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_suspend()
1080 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_suspend()
1081 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_suspend()
1082 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_suspend()
1089 xhci_msix_sync_irqs(xhci); in xhci_suspend()
1101 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) in xhci_resume() argument
1104 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_resume()
1118 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || in xhci_resume()
1119 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) in xhci_resume()
1123 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_resume()
1125 spin_lock_irq(&xhci->lock); in xhci_resume()
1127 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) in xhci_resume()
1135 retval = xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1138 xhci_warn(xhci, "Controller not ready at resume %d\n", in xhci_resume()
1140 spin_unlock_irq(&xhci->lock); in xhci_resume()
1144 xhci_restore_registers(xhci); in xhci_resume()
1146 xhci_set_cmd_ring_deq(xhci); in xhci_resume()
1149 command = readl(&xhci->op_regs->command); in xhci_resume()
1151 writel(command, &xhci->op_regs->command); in xhci_resume()
1154 * restore so setting the timeout to 100ms. Xhci specification in xhci_resume()
1157 if (xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1159 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); in xhci_resume()
1160 spin_unlock_irq(&xhci->lock); in xhci_resume()
1165 temp = readl(&xhci->op_regs->status); in xhci_resume()
1170 if (!xhci->broken_suspend) in xhci_resume()
1171 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); in xhci_resume()
1175 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_resume()
1176 !(xhci_all_ports_seen_u0(xhci))) { in xhci_resume()
1177 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_resume()
1178 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_resume()
1183 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); in xhci_resume()
1184 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); in xhci_resume()
1186 xhci_dbg(xhci, "Stop HCD\n"); in xhci_resume()
1187 xhci_halt(xhci); in xhci_resume()
1188 xhci_zero_64b_regs(xhci); in xhci_resume()
1189 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); in xhci_resume()
1190 spin_unlock_irq(&xhci->lock); in xhci_resume()
1193 xhci_cleanup_msix(xhci); in xhci_resume()
1195 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); in xhci_resume()
1196 temp = readl(&xhci->op_regs->status); in xhci_resume()
1197 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_resume()
1198 temp = readl(&xhci->ir_set->irq_pending); in xhci_resume()
1199 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_resume()
1201 xhci_dbg(xhci, "cleaning up memory\n"); in xhci_resume()
1202 xhci_mem_cleanup(xhci); in xhci_resume()
1203 xhci_debugfs_exit(xhci); in xhci_resume()
1204 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", in xhci_resume()
1205 readl(&xhci->op_regs->status)); in xhci_resume()
1214 secondary_hcd = xhci->shared_hcd; in xhci_resume()
1216 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); in xhci_resume()
1222 xhci_dbg(xhci, "Start the primary HCD\n"); in xhci_resume()
1225 xhci_dbg(xhci, "Start the secondary HCD\n"); in xhci_resume()
1229 xhci->shared_hcd->state = HC_STATE_SUSPENDED; in xhci_resume()
1234 command = readl(&xhci->op_regs->command); in xhci_resume()
1236 writel(command, &xhci->op_regs->command); in xhci_resume()
1237 xhci_handshake(&xhci->op_regs->status, STS_HALT, in xhci_resume()
1249 spin_unlock_irq(&xhci->lock); in xhci_resume()
1251 xhci_dbc_resume(xhci); in xhci_resume()
1260 pending_portevent = xhci_pending_portevent(xhci); in xhci_resume()
1263 pending_portevent = xhci_pending_portevent(xhci); in xhci_resume()
1267 usb_hcd_resume_root_hub(xhci->shared_hcd); in xhci_resume()
1277 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) in xhci_resume()
1278 compliance_mode_recovery_timer_init(xhci); in xhci_resume()
1280 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_resume()
1284 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in xhci_resume()
1286 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_resume()
1287 usb_hcd_poll_rh_status(xhci->shared_hcd); in xhci_resume()
1336 * address from the XHCI endpoint index.
1371 struct xhci_hcd *xhci; in xhci_check_args() local
1375 pr_debug("xHCI %s called with invalid args\n", func); in xhci_check_args()
1379 pr_debug("xHCI %s called for root hub\n", func); in xhci_check_args()
1383 xhci = hcd_to_xhci(hcd); in xhci_check_args()
1385 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { in xhci_check_args()
1386 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", in xhci_check_args()
1391 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_args()
1393 xhci_dbg(xhci, "xHCI %s called with udev and " in xhci_check_args()
1399 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_check_args()
1405 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1415 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_check_maxpacket() argument
1426 out_ctx = xhci->devs[slot_id]->out_ctx; in xhci_check_maxpacket()
1427 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1431 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1433 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1436 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1437 "Max packet size in xHCI HW = %d", in xhci_check_maxpacket()
1439 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1447 command = xhci_alloc_command(xhci, true, mem_flags); in xhci_check_maxpacket()
1451 command->in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_check_maxpacket()
1454 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_maxpacket()
1460 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_check_maxpacket()
1461 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_check_maxpacket()
1463 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1471 ret = xhci_configure_endpoint(xhci, urb->dev, command, in xhci_check_maxpacket()
1491 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_urb_enqueue() local
1508 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_urb_enqueue()
1512 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); in xhci_urb_enqueue()
1515 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { in xhci_urb_enqueue()
1516 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); in xhci_urb_enqueue()
1520 if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) { in xhci_urb_enqueue()
1521 xhci_dbg(xhci, "skip urb for usb offload\n"); in xhci_urb_enqueue()
1550 ret = xhci_check_maxpacket(xhci, slot_id, in xhci_urb_enqueue()
1560 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1562 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_enqueue()
1563 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", in xhci_urb_enqueue()
1569 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", in xhci_urb_enqueue()
1575 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); in xhci_urb_enqueue()
1583 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1587 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1591 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1595 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1604 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1626 * endpoint command, as noted in the xHCI 0.95 errata.
1644 struct xhci_hcd *xhci; in xhci_urb_dequeue() local
1653 xhci = hcd_to_xhci(hcd); in xhci_urb_dequeue()
1654 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_dequeue()
1664 vdev = xhci->devs[urb->dev->slot_id]; in xhci_urb_dequeue()
1671 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_urb_dequeue()
1676 temp = readl(&xhci->op_regs->status); in xhci_urb_dequeue()
1677 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_dequeue()
1678 xhci_hc_died(xhci); in xhci_urb_dequeue()
1688 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); in xhci_urb_dequeue()
1697 if (xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_urb_dequeue()
1698 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1714 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1737 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_urb_dequeue()
1746 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, in xhci_urb_dequeue()
1748 xhci_ring_cmd_db(xhci); in xhci_urb_dequeue()
1751 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1758 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1774 * the xhci->devs[slot_id] structure.
1779 struct xhci_hcd *xhci; in xhci_drop_endpoint() local
1791 xhci = hcd_to_xhci(hcd); in xhci_drop_endpoint()
1792 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_drop_endpoint()
1795 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_drop_endpoint()
1798 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", in xhci_drop_endpoint()
1803 in_ctx = xhci->devs[udev->slot_id]->in_ctx; in xhci_drop_endpoint()
1804 out_ctx = xhci->devs[udev->slot_id]->out_ctx; in xhci_drop_endpoint()
1807 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_drop_endpoint()
1813 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_drop_endpoint()
1821 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) in xhci_drop_endpoint()
1822 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", in xhci_drop_endpoint()
1833 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); in xhci_drop_endpoint()
1835 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); in xhci_drop_endpoint()
1837 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_drop_endpoint()
1857 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1862 struct xhci_hcd *xhci; in xhci_add_endpoint() local
1878 xhci = hcd_to_xhci(hcd); in xhci_add_endpoint()
1879 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_add_endpoint()
1888 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", in xhci_add_endpoint()
1893 virt_dev = xhci->devs[udev->slot_id]; in xhci_add_endpoint()
1897 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_add_endpoint()
1908 xhci_warn(xhci, "Trying to add endpoint 0x%x " in xhci_add_endpoint()
1918 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", in xhci_add_endpoint()
1928 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { in xhci_add_endpoint()
1948 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); in xhci_add_endpoint()
1951 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_add_endpoint()
1960 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) in xhci_zero_in_ctx() argument
1969 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_zero_in_ctx()
1981 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_zero_in_ctx()
1986 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); in xhci_zero_in_ctx()
1994 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, in xhci_configure_endpoint_result() argument
2002 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); in xhci_configure_endpoint_result()
2031 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint_result()
2036 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_configure_endpoint_result()
2044 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, in xhci_evaluate_context_result() argument
2052 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); in xhci_evaluate_context_result()
2057 "WARN: xHCI driver setup invalid evaluate context command.\n"); in xhci_evaluate_context_result()
2081 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_evaluate_context_result()
2086 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_evaluate_context_result()
2094 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, in xhci_count_num_new_endpoints() argument
2115 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, in xhci_count_num_dropped_endpoints() argument
2139 * Must be called with xhci->lock held.
2141 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, in xhci_reserve_host_resources() argument
2146 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_reserve_host_resources()
2147 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { in xhci_reserve_host_resources()
2148 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2151 xhci->num_active_eps, added_eps, in xhci_reserve_host_resources()
2152 xhci->limit_active_eps); in xhci_reserve_host_resources()
2155 xhci->num_active_eps += added_eps; in xhci_reserve_host_resources()
2156 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2158 xhci->num_active_eps); in xhci_reserve_host_resources()
2166 * Must be called with xhci->lock held.
2168 static void xhci_free_host_resources(struct xhci_hcd *xhci, in xhci_free_host_resources() argument
2173 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_free_host_resources()
2174 xhci->num_active_eps -= num_failed_eps; in xhci_free_host_resources()
2175 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_host_resources()
2178 xhci->num_active_eps); in xhci_free_host_resources()
2185 * Must be called with xhci->lock held.
2187 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, in xhci_finish_resource_reservation() argument
2192 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); in xhci_finish_resource_reservation()
2193 xhci->num_active_eps -= num_dropped_eps; in xhci_finish_resource_reservation()
2195 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_finish_resource_reservation()
2198 xhci->num_active_eps); in xhci_finish_resource_reservation()
2234 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, in xhci_check_tt_bw_table() argument
2242 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; in xhci_check_tt_bw_table()
2264 static int xhci_check_ss_bw(struct xhci_hcd *xhci, in xhci_check_ss_bw() argument
2321 static int xhci_check_bw_table(struct xhci_hcd *xhci, in xhci_check_bw_table() argument
2337 return xhci_check_ss_bw(xhci, virt_dev); in xhci_check_bw_table()
2358 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2361 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_check_bw_table()
2362 xhci_warn(xhci, "Not enough bandwidth on HS bus for " in xhci_check_bw_table()
2366 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2371 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2454 xhci_warn(xhci, "Not enough bandwidth. " in xhci_check_bw_table()
2477 xhci->rh_bw[port_index].num_active_tts; in xhci_check_bw_table()
2480 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2489 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", in xhci_check_bw_table()
2524 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, in xhci_drop_ep_from_interval_table() argument
2539 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= in xhci_drop_ep_from_interval_table()
2542 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= in xhci_drop_ep_from_interval_table()
2588 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, in xhci_add_ep_to_interval_table() argument
2604 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += in xhci_add_ep_to_interval_table()
2607 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += in xhci_add_ep_to_interval_table()
2662 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, in xhci_update_tt_active_eps() argument
2670 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; in xhci_update_tt_active_eps()
2682 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, in xhci_reserve_bandwidth() argument
2696 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_reserve_bandwidth()
2712 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2720 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); in xhci_reserve_bandwidth()
2724 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2732 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_reserve_bandwidth()
2736 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_reserve_bandwidth()
2749 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2761 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2775 static int xhci_configure_endpoint(struct xhci_hcd *xhci, in xhci_configure_endpoint() argument
2789 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2791 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_configure_endpoint()
2792 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2796 virt_dev = xhci->devs[udev->slot_id]; in xhci_configure_endpoint()
2800 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2801 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_configure_endpoint()
2806 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && in xhci_configure_endpoint()
2807 xhci_reserve_host_resources(xhci, ctrl_ctx)) { in xhci_configure_endpoint()
2808 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2809 xhci_warn(xhci, "Not enough host resources, " in xhci_configure_endpoint()
2811 xhci->num_active_eps); in xhci_configure_endpoint()
2814 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && in xhci_configure_endpoint()
2815 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { in xhci_configure_endpoint()
2816 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2817 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2818 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2819 xhci_warn(xhci, "Not enough bandwidth\n"); in xhci_configure_endpoint()
2823 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_configure_endpoint()
2829 ret = xhci_queue_configure_endpoint(xhci, command, in xhci_configure_endpoint()
2833 ret = xhci_queue_evaluate_context(xhci, command, in xhci_configure_endpoint()
2837 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2838 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2839 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2840 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint()
2844 xhci_ring_cmd_db(xhci); in xhci_configure_endpoint()
2845 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2851 ret = xhci_configure_endpoint_result(xhci, udev, in xhci_configure_endpoint()
2854 ret = xhci_evaluate_context_result(xhci, udev, in xhci_configure_endpoint()
2857 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_configure_endpoint()
2858 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2863 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2865 xhci_finish_resource_reservation(xhci, ctrl_ctx); in xhci_configure_endpoint()
2866 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2871 ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); in xhci_configure_endpoint()
2873 xhci_warn(xhci, "sync device context failed, ret=%d", ret); in xhci_configure_endpoint()
2879 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, in xhci_check_bw_drop_ep_streams() argument
2885 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", in xhci_check_bw_drop_ep_streams()
2887 xhci_free_stream_info(xhci, ep->stream_info); in xhci_check_bw_drop_ep_streams()
2900 * else should be touching the xhci->devs[slot_id] structure, so we
2901 * don't need to take the xhci->lock for manipulating that.
2907 struct xhci_hcd *xhci; in xhci_check_bandwidth() local
2916 xhci = hcd_to_xhci(hcd); in xhci_check_bandwidth()
2917 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_check_bandwidth()
2918 (xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_check_bandwidth()
2921 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_check_bandwidth()
2922 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_bandwidth()
2924 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_check_bandwidth()
2933 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_bandwidth()
2949 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_check_bandwidth()
2961 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_check_bandwidth()
2971 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2972 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2975 xhci_zero_in_ctx(xhci, virt_dev); in xhci_check_bandwidth()
2987 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2989 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2992 xhci_debugfs_create_endpoint(xhci, virt_dev, i); in xhci_check_bandwidth()
3004 struct xhci_hcd *xhci; in xhci_reset_bandwidth() local
3011 xhci = hcd_to_xhci(hcd); in xhci_reset_bandwidth()
3013 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_reset_bandwidth()
3014 virt_dev = xhci->devs[udev->slot_id]; in xhci_reset_bandwidth()
3018 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_reset_bandwidth()
3019 if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i)) in xhci_reset_bandwidth()
3020 xhci_vendor_free_transfer_ring(xhci, virt_dev, i); in xhci_reset_bandwidth()
3022 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); in xhci_reset_bandwidth()
3027 xhci_zero_in_ctx(xhci, virt_dev); in xhci_reset_bandwidth()
3031 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_config_ep() argument
3039 xhci_slot_copy(xhci, in_ctx, out_ctx); in xhci_setup_input_ctx_for_config_ep()
3046 struct xhci_hcd *xhci; in xhci_endpoint_disable() local
3053 xhci = hcd_to_xhci(hcd); in xhci_endpoint_disable()
3055 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_disable()
3061 vdev = xhci->devs[udev->slot_id]; in xhci_endpoint_disable()
3072 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_disable()
3078 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", in xhci_endpoint_disable()
3082 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_disable()
3094 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3100 struct xhci_hcd *xhci; in xhci_endpoint_reset() local
3111 xhci = hcd_to_xhci(hcd); in xhci_endpoint_reset()
3115 vdev = xhci->devs[udev->slot_id]; in xhci_endpoint_reset()
3130 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3133 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3136 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3147 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3151 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3155 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3168 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3169 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3173 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, in xhci_endpoint_reset()
3176 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3177 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3178 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", in xhci_endpoint_reset()
3183 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3184 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3188 err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); in xhci_endpoint_reset()
3190 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_endpoint_reset()
3195 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3200 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3201 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3202 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_endpoint_reset()
3207 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, in xhci_endpoint_reset()
3209 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); in xhci_endpoint_reset()
3211 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, in xhci_endpoint_reset()
3214 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3215 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3216 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", in xhci_endpoint_reset()
3221 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3222 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3226 err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); in xhci_endpoint_reset()
3228 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_endpoint_reset()
3231 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3233 xhci_free_command(xhci, stop_cmd); in xhci_endpoint_reset()
3234 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3237 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3240 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, in xhci_check_streams_endpoint() argument
3250 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); in xhci_check_streams_endpoint()
3254 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" in xhci_check_streams_endpoint()
3261 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_check_streams_endpoint()
3264 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " in xhci_check_streams_endpoint()
3267 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " in xhci_check_streams_endpoint()
3271 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { in xhci_check_streams_endpoint()
3272 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " in xhci_check_streams_endpoint()
3280 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, in xhci_calculate_streams_entries() argument
3290 * level page entries), but that's an optional feature for xHCI host in xhci_calculate_streams_entries()
3293 max_streams = HCC_MAX_PSA(xhci->hcc_params); in xhci_calculate_streams_entries()
3295 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", in xhci_calculate_streams_entries()
3306 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, in xhci_calculate_streams_and_bitmask() argument
3317 ret = xhci_check_streams_endpoint(xhci, udev, in xhci_calculate_streams_and_bitmask()
3324 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", in xhci_calculate_streams_and_bitmask()
3338 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, in xhci_calculate_no_streams_bitmask() argument
3349 if (!xhci->devs[slot_id]) in xhci_calculate_no_streams_bitmask()
3354 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_calculate_no_streams_bitmask()
3357 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3366 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3370 xhci_warn(xhci, "WARN xhci_free_streams() called " in xhci_calculate_no_streams_bitmask()
3400 struct xhci_hcd *xhci; in xhci_alloc_streams() local
3414 * stream 0 that is reserved for xHCI usage. in xhci_alloc_streams()
3417 xhci = hcd_to_xhci(hcd); in xhci_alloc_streams()
3418 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", in xhci_alloc_streams()
3422 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || in xhci_alloc_streams()
3423 HCC_MAX_PSA(xhci->hcc_params) < 4) { in xhci_alloc_streams()
3424 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); in xhci_alloc_streams()
3428 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_alloc_streams()
3434 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_alloc_streams()
3436 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3444 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3445 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, in xhci_alloc_streams()
3448 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3449 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3453 xhci_warn(xhci, "WARN: endpoints can't handle " in xhci_alloc_streams()
3455 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3456 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3459 vdev = xhci->devs[udev->slot_id]; in xhci_alloc_streams()
3467 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3473 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); in xhci_alloc_streams()
3474 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", in xhci_alloc_streams()
3480 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, in xhci_alloc_streams()
3496 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); in xhci_alloc_streams()
3498 xhci_endpoint_copy(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3500 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, in xhci_alloc_streams()
3506 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3511 ret = xhci_configure_endpoint(xhci, udev, config_cmd, in xhci_alloc_streams()
3521 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3525 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", in xhci_alloc_streams()
3529 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3530 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3534 xhci_debugfs_create_stream_files(xhci, vdev, ep_index); in xhci_alloc_streams()
3543 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_alloc_streams()
3550 xhci_endpoint_zero(xhci, vdev, eps[i]); in xhci_alloc_streams()
3552 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3567 struct xhci_hcd *xhci; in xhci_free_streams() local
3575 xhci = hcd_to_xhci(hcd); in xhci_free_streams()
3576 vdev = xhci->devs[udev->slot_id]; in xhci_free_streams()
3579 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3580 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, in xhci_free_streams()
3583 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3595 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3596 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_free_streams()
3605 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_free_streams()
3606 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= in xhci_free_streams()
3609 xhci_endpoint_copy(xhci, command->in_ctx, in xhci_free_streams()
3614 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, in xhci_free_streams()
3617 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3622 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_free_streams()
3631 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3634 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_free_streams()
3642 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3652 * Must be called with xhci->lock held.
3654 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, in xhci_free_device_endpoint_resources() argument
3667 xhci->num_active_eps -= num_dropped_eps; in xhci_free_device_endpoint_resources()
3669 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_device_endpoint_resources()
3673 xhci->num_active_eps); in xhci_free_device_endpoint_resources()
3699 struct xhci_hcd *xhci; in xhci_discover_or_reset_device() local
3709 xhci = hcd_to_xhci(hcd); in xhci_discover_or_reset_device()
3711 virt_dev = xhci->devs[slot_id]; in xhci_discover_or_reset_device()
3713 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3730 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3741 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_discover_or_reset_device()
3748 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); in xhci_discover_or_reset_device()
3755 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); in xhci_discover_or_reset_device()
3757 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); in xhci_discover_or_reset_device()
3762 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3764 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); in xhci_discover_or_reset_device()
3766 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_discover_or_reset_device()
3767 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3770 xhci_ring_cmd_db(xhci); in xhci_discover_or_reset_device()
3771 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3776 ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); in xhci_discover_or_reset_device()
3778 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_discover_or_reset_device()
3791 xhci_warn(xhci, "Timeout waiting for reset device command\n"); in xhci_discover_or_reset_device()
3796 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", in xhci_discover_or_reset_device()
3798 xhci_get_slot_state(xhci, virt_dev->out_ctx)); in xhci_discover_or_reset_device()
3799 xhci_dbg(xhci, "Not freeing device rings.\n"); in xhci_discover_or_reset_device()
3804 xhci_dbg(xhci, "Successful reset device command.\n"); in xhci_discover_or_reset_device()
3807 if (xhci_is_vendor_info_code(xhci, ret)) in xhci_discover_or_reset_device()
3809 xhci_warn(xhci, "Unknown completion code %u for " in xhci_discover_or_reset_device()
3816 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_discover_or_reset_device()
3817 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3819 xhci_free_device_endpoint_resources(xhci, virt_dev, false); in xhci_discover_or_reset_device()
3820 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3828 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", in xhci_discover_or_reset_device()
3830 xhci_free_stream_info(xhci, ep->stream_info); in xhci_discover_or_reset_device()
3836 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3837 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3840 xhci_drop_ep_from_interval_table(xhci, in xhci_discover_or_reset_device()
3849 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_discover_or_reset_device()
3854 xhci_free_command(xhci, reset_device_cmd); in xhci_discover_or_reset_device()
3865 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_free_dev() local
3875 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_free_dev()
3885 virt_dev = xhci->devs[udev->slot_id]; in xhci_free_dev()
3886 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_free_dev()
3895 xhci_disable_slot(xhci, udev->slot_id); in xhci_free_dev()
3896 xhci_free_virt_device(xhci, udev->slot_id); in xhci_free_dev()
3899 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) in xhci_disable_slot() argument
3906 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_disable_slot()
3910 xhci_debugfs_remove_slot(xhci, slot_id); in xhci_disable_slot()
3912 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_slot()
3914 state = readl(&xhci->op_regs->status); in xhci_disable_slot()
3915 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || in xhci_disable_slot()
3916 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_disable_slot()
3917 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3922 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_disable_slot()
3925 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3929 xhci_ring_cmd_db(xhci); in xhci_disable_slot()
3930 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3935 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", in xhci_disable_slot()
3938 xhci_free_command(xhci, command); in xhci_disable_slot()
3947 * Must be called with xhci->lock held.
3949 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) in xhci_reserve_host_control_ep_resources() argument
3951 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { in xhci_reserve_host_control_ep_resources()
3952 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3955 xhci->num_active_eps, xhci->limit_active_eps); in xhci_reserve_host_control_ep_resources()
3958 xhci->num_active_eps += 1; in xhci_reserve_host_control_ep_resources()
3959 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3961 xhci->num_active_eps); in xhci_reserve_host_control_ep_resources()
3972 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_alloc_dev() local
3979 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_alloc_dev()
3983 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3984 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); in xhci_alloc_dev()
3986 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3987 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_alloc_dev()
3988 xhci_free_command(xhci, command); in xhci_alloc_dev()
3991 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3992 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3998 xhci_err(xhci, "Error while assigning device slot ID\n"); in xhci_alloc_dev()
3999 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", in xhci_alloc_dev()
4001 readl(&xhci->cap_regs->hcs_params1))); in xhci_alloc_dev()
4002 xhci_free_command(xhci, command); in xhci_alloc_dev()
4006 xhci_free_command(xhci, command); in xhci_alloc_dev()
4008 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_alloc_dev()
4009 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
4010 ret = xhci_reserve_host_control_ep_resources(xhci); in xhci_alloc_dev()
4012 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
4013 xhci_warn(xhci, "Not enough host resources, " in xhci_alloc_dev()
4015 xhci->num_active_eps); in xhci_alloc_dev()
4018 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
4024 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { in xhci_alloc_dev()
4025 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); in xhci_alloc_dev()
4029 ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); in xhci_alloc_dev()
4031 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_alloc_dev()
4036 vdev = xhci->devs[slot_id]; in xhci_alloc_dev()
4037 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_alloc_dev()
4042 xhci_debugfs_create_slot(xhci, slot_id); in xhci_alloc_dev()
4048 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_alloc_dev()
4056 xhci_disable_slot(xhci, udev->slot_id); in xhci_alloc_dev()
4057 xhci_free_virt_device(xhci, udev->slot_id); in xhci_alloc_dev()
4073 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_setup_device() local
4079 mutex_lock(&xhci->mutex); in xhci_setup_device()
4081 if (xhci->xhc_state) { /* dying, removing or halted */ in xhci_setup_device()
4087 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4093 virt_dev = xhci->devs[udev->slot_id]; in xhci_setup_device()
4101 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", in xhci_setup_device()
4106 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
4112 xhci_dbg(xhci, "Slot already in default state\n"); in xhci_setup_device()
4117 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_setup_device()
4125 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_setup_device()
4128 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_device()
4135 * virt_device realloaction after a resume with an xHCI power loss, in xhci_setup_device()
4139 xhci_setup_addressable_virt_dev(xhci, udev); in xhci_setup_device()
4142 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); in xhci_setup_device()
4146 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
4150 spin_lock_irqsave(&xhci->lock, flags); in xhci_setup_device()
4152 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, in xhci_setup_device()
4155 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4156 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4160 xhci_ring_cmd_db(xhci); in xhci_setup_device()
4161 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4166 ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); in xhci_setup_device()
4168 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_setup_device()
4180 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); in xhci_setup_device()
4185 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", in xhci_setup_device()
4192 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4193 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_setup_device()
4194 xhci_free_virt_device(xhci, udev->slot_id); in xhci_setup_device()
4206 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4210 xhci_err(xhci, in xhci_setup_device()
4213 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); in xhci_setup_device()
4219 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_setup_device()
4220 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4222 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4225 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], in xhci_setup_device()
4227 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); in xhci_setup_device()
4228 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4231 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
4237 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, in xhci_setup_device()
4242 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
4245 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4249 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4286 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, in xhci_change_max_exit_latency() argument
4296 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4298 virt_dev = xhci->devs[udev->slot_id]; in xhci_change_max_exit_latency()
4303 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated in xhci_change_max_exit_latency()
4307 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4312 command = xhci->lpm_command; in xhci_change_max_exit_latency()
4315 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4316 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_change_max_exit_latency()
4321 ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); in xhci_change_max_exit_latency()
4323 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4324 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_change_max_exit_latency()
4329 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); in xhci_change_max_exit_latency()
4330 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4333 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_change_max_exit_latency()
4338 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_change_max_exit_latency()
4342 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_change_max_exit_latency()
4346 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4348 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4353 struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci) in xhci_vendor_get_ops() argument
4355 return xhci->vendor_ops; in xhci_vendor_get_ops()
4359 int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id) in xhci_vendor_sync_dev_ctx() argument
4361 struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); in xhci_vendor_sync_dev_ctx()
4364 return ops->sync_dev_ctx(xhci, slot_id); in xhci_vendor_sync_dev_ctx()
4368 bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb) in xhci_vendor_usb_offload_skip_urb() argument
4370 struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); in xhci_vendor_usb_offload_skip_urb()
4373 return ops->usb_offload_skip_urb(xhci, urb); in xhci_vendor_usb_offload_skip_urb()
4384 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, in xhci_calculate_hird_besl() argument
4391 u2del = HCS_U2_LATENCY(xhci->hcs_params3); in xhci_calculate_hird_besl()
4428 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ in xhci_calculate_usb2_hw_lpm_params()
4443 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_set_usb2_hardware_lpm() local
4452 if (xhci->quirks & XHCI_HW_LPM_DISABLE) in xhci_set_usb2_hardware_lpm()
4455 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || in xhci_set_usb2_hardware_lpm()
4466 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4468 ports = xhci->usb2_rhub.ports; in xhci_set_usb2_hardware_lpm()
4474 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", in xhci_set_usb2_hardware_lpm()
4482 * systems. See XHCI_DEFAULT_BESL definition in xhci.h in xhci_set_usb2_hardware_lpm()
4492 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4494 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx in xhci_set_usb2_hardware_lpm()
4502 ret = xhci_change_max_exit_latency(xhci, udev, in xhci_set_usb2_hardware_lpm()
4508 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4515 hird = xhci_calculate_hird_besl(xhci, udev); in xhci_set_usb2_hardware_lpm()
4532 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4534 xhci_change_max_exit_latency(xhci, udev, 0); in xhci_set_usb2_hardware_lpm()
4543 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4551 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, in xhci_check_usb2_port_capability() argument
4557 for (i = 0; i < xhci->num_ext_caps; i++) { in xhci_check_usb2_port_capability()
4558 if (xhci->ext_caps[i] & capability) { in xhci_check_usb2_port_capability()
4560 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; in xhci_check_usb2_port_capability()
4561 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); in xhci_check_usb2_port_capability()
4572 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_device() local
4583 if (xhci->hw_lpm_support == 1 && in xhci_update_device()
4585 xhci, portnum, XHCI_HLC)) { in xhci_update_device()
4589 if (xhci_check_usb2_port_capability(xhci, portnum, in xhci_update_device()
4694 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, in xhci_calculate_u1_timeout() argument
4708 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u1_timeout()
4758 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, in xhci_calculate_u2_timeout() argument
4772 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u2_timeout()
4789 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_call_host_update_timeout_for_endpoint() argument
4796 return xhci_calculate_u1_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4798 return xhci_calculate_u2_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4803 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_update_timeout_for_endpoint() argument
4811 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_endpoint()
4828 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, in xhci_update_timeout_for_interface() argument
4837 if (xhci_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_interface()
4869 static int xhci_check_tier_policy(struct xhci_hcd *xhci, in xhci_check_tier_policy() argument
4873 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_check_tier_policy()
4887 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_calculate_lpm_timeout() local
4903 if (xhci_check_tier_policy(xhci, udev, state) < 0) in xhci_calculate_lpm_timeout()
4909 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, in xhci_calculate_lpm_timeout()
4943 if (xhci_update_timeout_for_interface(xhci, udev, in xhci_calculate_lpm_timeout()
4987 /* xHCI host controller max exit latency field is only 16 bits wide. */ in calculate_max_exit_latency()
5000 struct xhci_hcd *xhci; in xhci_enable_usb3_lpm_timeout() local
5005 xhci = hcd_to_xhci(hcd); in xhci_enable_usb3_lpm_timeout()
5010 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_enable_usb3_lpm_timeout()
5011 !xhci->devs[udev->slot_id]) in xhci_enable_usb3_lpm_timeout()
5022 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_enable_usb3_lpm_timeout()
5031 struct xhci_hcd *xhci; in xhci_disable_usb3_lpm_timeout() local
5034 xhci = hcd_to_xhci(hcd); in xhci_disable_usb3_lpm_timeout()
5035 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_disable_usb3_lpm_timeout()
5036 !xhci->devs[udev->slot_id]) in xhci_disable_usb3_lpm_timeout()
5040 return xhci_change_max_exit_latency(xhci, udev, mel); in xhci_disable_usb3_lpm_timeout()
5076 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_hub_device() local
5089 vdev = xhci->devs[hdev->slot_id]; in xhci_update_hub_device()
5091 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); in xhci_update_hub_device()
5095 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_update_hub_device()
5101 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_update_hub_device()
5103 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5107 spin_lock_irqsave(&xhci->lock, flags); in xhci_update_hub_device()
5109 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { in xhci_update_hub_device()
5110 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); in xhci_update_hub_device()
5111 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5112 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
5116 ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id); in xhci_update_hub_device()
5118 xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", in xhci_update_hub_device()
5120 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5121 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
5125 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); in xhci_update_hub_device()
5127 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); in xhci_update_hub_device()
5131 * but it may be already set to 1 when setup an xHCI virtual in xhci_update_hub_device()
5139 if (xhci->hci_version > 0x95) { in xhci_update_hub_device()
5140 xhci_dbg(xhci, "xHCI version %x needs hub " in xhci_update_hub_device()
5142 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
5148 * xHCI 1.0: this field shall be 0 if the device is not a in xhci_update_hub_device()
5154 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) in xhci_update_hub_device()
5158 xhci_dbg(xhci, "xHCI version %x doesn't need hub " in xhci_update_hub_device()
5160 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
5163 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
5165 xhci_dbg(xhci, "Set up %s for hub device.\n", in xhci_update_hub_device()
5166 (xhci->hci_version > 0x95) ? in xhci_update_hub_device()
5172 if (xhci->hci_version > 0x95) in xhci_update_hub_device()
5173 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
5176 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
5179 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5185 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_get_frame() local
5187 return readl(&xhci->run_regs->microframe_index) >> 3; in xhci_get_frame()
5192 struct xhci_hcd *xhci; in xhci_gen_setup() local
5207 /* XHCI controllers don't stop the ep queue on short packets :| */ in xhci_gen_setup()
5210 xhci = hcd_to_xhci(hcd); in xhci_gen_setup()
5213 xhci->main_hcd = hcd; in xhci_gen_setup()
5214 xhci->usb2_rhub.hcd = hcd; in xhci_gen_setup()
5216 * The xHCI driver will register the USB 3.0 roothub. in xhci_gen_setup()
5221 * USB 2.0 roothub under xHCI has an integrated TT, in xhci_gen_setup()
5228 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts in xhci_gen_setup()
5231 * This was later clarified in xHCI 1.2. in xhci_gen_setup()
5236 if (xhci->usb3_rhub.min_rev == 0x1) in xhci_gen_setup()
5239 minor_rev = xhci->usb3_rhub.min_rev / 0x10; in xhci_gen_setup()
5253 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", in xhci_gen_setup()
5257 xhci->usb3_rhub.hcd = hcd; in xhci_gen_setup()
5258 /* xHCI private pointer was set in xhci_pci_probe for the second in xhci_gen_setup()
5264 mutex_init(&xhci->mutex); in xhci_gen_setup()
5265 xhci->cap_regs = hcd->regs; in xhci_gen_setup()
5266 xhci->op_regs = hcd->regs + in xhci_gen_setup()
5267 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); in xhci_gen_setup()
5268 xhci->run_regs = hcd->regs + in xhci_gen_setup()
5269 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); in xhci_gen_setup()
5271 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); in xhci_gen_setup()
5272 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); in xhci_gen_setup()
5273 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); in xhci_gen_setup()
5274 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); in xhci_gen_setup()
5275 xhci->hci_version = HC_VERSION(xhci->hcc_params); in xhci_gen_setup()
5276 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); in xhci_gen_setup()
5277 if (xhci->hci_version > 0x100) in xhci_gen_setup()
5278 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); in xhci_gen_setup()
5280 xhci->quirks |= quirks; in xhci_gen_setup()
5282 get_quirks(dev, xhci); in xhci_gen_setup()
5284 /* In xhci controllers which follow xhci 1.0 spec gives a spurious in xhci_gen_setup()
5288 if (xhci->hci_version > 0x96) in xhci_gen_setup()
5289 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; in xhci_gen_setup()
5292 retval = xhci_halt(xhci); in xhci_gen_setup()
5296 xhci_zero_64b_regs(xhci); in xhci_gen_setup()
5298 xhci_dbg(xhci, "Resetting HCD\n"); in xhci_gen_setup()
5300 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); in xhci_gen_setup()
5303 xhci_dbg(xhci, "Reset complete\n"); in xhci_gen_setup()
5306 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) in xhci_gen_setup()
5309 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, in xhci_gen_setup()
5312 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) in xhci_gen_setup()
5313 xhci->hcc_params &= ~BIT(0); in xhci_gen_setup()
5317 if (HCC_64BIT_ADDR(xhci->hcc_params) && in xhci_gen_setup()
5319 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); in xhci_gen_setup()
5329 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); in xhci_gen_setup()
5333 xhci_dbg(xhci, "Calling HCD init\n"); in xhci_gen_setup()
5338 xhci_dbg(xhci, "Called HCD init\n"); in xhci_gen_setup()
5340 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", in xhci_gen_setup()
5341 xhci->hcc_params, xhci->hci_version, xhci->quirks); in xhci_gen_setup()
5350 struct xhci_hcd *xhci; in xhci_clear_tt_buffer_complete() local
5356 xhci = hcd_to_xhci(hcd); in xhci_clear_tt_buffer_complete()
5358 spin_lock_irqsave(&xhci->lock, flags); in xhci_clear_tt_buffer_complete()
5363 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; in xhci_clear_tt_buffer_complete()
5364 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_clear_tt_buffer_complete()
5365 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_clear_tt_buffer_complete()
5369 .description = "xhci-hcd",
5370 .product_desc = "xHCI Host Controller",