Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dae3be1..6c17e3f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
@@ -52,7 +53,6 @@
return false;
}
-/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
@@ -69,18 +69,16 @@
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = readl(ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ (result & mask) == done ||
+ result == U32_MAX,
+ 1, usec);
+ if (result == U32_MAX) /* card removed */
+ return -ENODEV;
+
+ return ret;
}
/*
@@ -169,7 +167,7 @@
{
u32 command;
u32 state;
- int ret, i;
+ int ret;
state = readl(&xhci->op_regs->status);
@@ -215,11 +213,12 @@
ret = xhci_handshake(&xhci->op_regs->status,
STS_CNR, 0, 10 * 1000 * 1000);
- for (i = 0; i < 2; i++) {
- xhci->bus_state[i].port_c_suspend = 0;
- xhci->bus_state[i].suspended_ports = 0;
- xhci->bus_state[i].resuming_ports = 0;
- }
+ xhci->usb2_rhub.bus_state.port_c_suspend = 0;
+ xhci->usb2_rhub.bus_state.suspended_ports = 0;
+ xhci->usb2_rhub.bus_state.resuming_ports = 0;
+ xhci->usb3_rhub.bus_state.port_c_suspend = 0;
+ xhci->usb3_rhub.bus_state.suspended_ports = 0;
+ xhci->usb3_rhub.bus_state.resuming_ports = 0;
return ret;
}
@@ -244,7 +243,7 @@
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@ -892,7 +891,7 @@
struct xhci_port **ports;
int port_index;
unsigned long flags;
- u32 t1, t2;
+ u32 t1, t2, portsc;
spin_lock_irqsave(&xhci->lock, flags);
@@ -901,10 +900,15 @@
ports = xhci->usb3_rhub.ports;
while (port_index--) {
t1 = readl(ports[port_index]->addr);
+ portsc = t1;
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
- if (t1 != t2)
+ if (t1 != t2) {
writel(t2, ports[port_index]->addr);
+ xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+ xhci->usb3_rhub.hcd->self.busnum,
+ port_index + 1, portsc, t2);
+ }
}
/* disable usb2 ports Wake bits */
@@ -912,12 +916,16 @@
ports = xhci->usb2_rhub.ports;
while (port_index--) {
t1 = readl(ports[port_index]->addr);
+ portsc = t1;
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
- if (t1 != t2)
+ if (t1 != t2) {
writel(t2, ports[port_index]->addr);
+ xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+ xhci->usb2_rhub.hcd->self.busnum,
+ port_index + 1, portsc, t2);
+ }
}
-
spin_unlock_irqrestore(&xhci->lock, flags);
}
@@ -1024,7 +1032,7 @@
writel(command, &xhci->op_regs->command);
xhci->broken_suspend = 0;
if (xhci_handshake(&xhci->op_regs->status,
- STS_SAVE, 0, 10 * 1000)) {
+ STS_SAVE, 0, 20 * 1000)) {
/*
* AMD SNPS xHC 3.0 occasionally does not clear the
* SSS bit of USBSTS and when driver tries to poll
@@ -1087,9 +1095,9 @@
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
*/
- if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
- time_before(jiffies,
- xhci->bus_state[1].next_statechange))
+
+ if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
+ time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
msleep(100);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -1100,6 +1108,18 @@
hibernated = true;
if (!hibernated) {
+ /*
+ * Some controllers might lose power during suspend, so wait
+ * for controller not ready bit to clear, just as in xHC init.
+ */
+ retval = xhci_handshake(&xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
+ if (retval) {
+ xhci_warn(xhci, "Controller not ready at resume %d\n",
+ retval);
+ spin_unlock_irq(&xhci->lock);
+ return retval;
+ }
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
@@ -1237,6 +1257,21 @@
/*-------------------------------------------------------------------------*/
+/*
+ * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
+ * we'll copy the actual data into the TRB address register. This is limited to
+ * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
+ * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
+ */
+static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ if (xhci_urb_suitable_for_idt(urb))
+ return 0;
+
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+}
+
/**
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
@@ -1443,6 +1478,10 @@
xhci_dbg(xhci, "urb submitted during PCI suspend\n");
return -ESHUTDOWN;
}
+ if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
+ xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
+ return -ENODEV;
+ }
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
@@ -1454,8 +1493,7 @@
else
num_tds = 1;
- urb_priv = kzalloc(sizeof(struct urb_priv) +
- num_tds * sizeof(struct xhci_td), mem_flags);
+ urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
if (!urb_priv)
return -ENOMEM;
@@ -1783,6 +1821,7 @@
struct xhci_container_ctx *in_ctx;
unsigned int ep_index;
struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
u32 new_add_flags, new_drop_flags;
struct xhci_virt_device *virt_dev;
@@ -1873,6 +1912,9 @@
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ trace_xhci_add_endpoint(ep_ctx);
+
xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
@@ -2747,6 +2789,8 @@
}
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+
+ trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
trace_xhci_configure_endpoint(slot_ctx);
if (!ctx_change)
@@ -3027,6 +3071,48 @@
}
}
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
+ struct usb_device *udev;
+ unsigned long flags;
+ unsigned int ep_index;
+
+ xhci = hcd_to_xhci(hcd);
+rescan:
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ udev = (struct usb_device *)host_ep->hcpriv;
+ if (!udev || !udev->slot_id)
+ goto done;
+
+ vdev = xhci->devs[udev->slot_id];
+ if (!vdev)
+ goto done;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = &vdev->eps[ep_index];
+ if (!ep)
+ goto done;
+
+ /* wait for hub_tt_work to finish clearing hub TT */
+ if (ep->ep_state & EP_CLEARING_TT) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ }
+
+ if (ep->ep_state)
+ xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
+ ep->ep_state);
+done:
+ host_ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
/*
* Called after usb core issues a clear halt control message.
* The host side of the halt should already be cleared by a reset endpoint
@@ -3051,14 +3137,25 @@
unsigned int ep_index;
unsigned long flags;
u32 ep_flag;
+ int err;
xhci = hcd_to_xhci(hcd);
if (!host_ep->hcpriv)
return;
udev = (struct usb_device *) host_ep->hcpriv;
vdev = xhci->devs[udev->slot_id];
+
+ /*
+ * vdev may be lost due to xHC restore error and re-initialization
+ * during S3/S4 resume. A new vdev will be allocated later by
+ * xhci_discover_or_reset_device()
+ */
+ if (!udev->slot_id || !vdev)
+ return;
ep_index = xhci_get_endpoint_index(&host_ep->desc);
ep = &vdev->eps[ep_index];
+ if (!ep)
+ return;
/* Bail out if toggle is already being cleared by a endpoint reset */
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
@@ -3100,7 +3197,17 @@
xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
- xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+ err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+ ep_index, 0);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3114,8 +3221,16 @@
ctrl_ctx, ep_flag, ep_flag);
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
- xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+ err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
udev->slot_id, false);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3726,6 +3841,7 @@
}
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+ virt_dev->flags = 0;
ret = 0;
command_cleanup:
@@ -3771,7 +3887,6 @@
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
virt_dev->udev = NULL;
ret = xhci_disable_slot(xhci, udev->slot_id);
if (ret)
@@ -3789,6 +3904,8 @@
if (!command)
return -ENOMEM;
+ xhci_debugfs_remove_slot(xhci, slot_id);
+
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
@@ -4012,6 +4129,7 @@
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
+ trace_xhci_address_ctrl_ctx(ctrl_ctx);
spin_lock_irqsave(&xhci->lock, flags);
trace_xhci_setup_device(virt_dev);
ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
@@ -4096,6 +4214,8 @@
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Internal device address = %d",
@@ -4289,7 +4409,6 @@
pm_addr = ports[port_num]->addr + PORTPMSC;
pm_val = readl(pm_addr);
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
- field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
@@ -4301,6 +4420,7 @@
* default one which works with mixed HIRD and BESL
* systems. See XHCI_DEFAULT_BESL definition in xhci.h
*/
+ field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if ((field & USB_BESL_SUPPORT) &&
(field & USB_BESL_BASELINE_VALID))
hird = USB_GET_BESL_BASELINE(field);
@@ -4388,8 +4508,7 @@
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int portnum = udev->portnum - 1;
- if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
- !udev->lpm_capable)
+ if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
return 0;
/* we only support lpm for non-hub device connected to root hub yet */
@@ -4628,12 +4747,12 @@
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
- /* If we found we can't enable hub-initiated LPM, or
+ /* If we found we can't enable hub-initiated LPM, and
* the U1 or U2 exit latency was too high to allow
- * device-initiated LPM as well, just stop searching.
+ * device-initiated LPM as well, then we will disable LPM
+ * for this device, so stop searching any further.
*/
- if (alt_timeout == USB3_LPM_DISABLED ||
- alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+ if (alt_timeout == USB3_LPM_DISABLED) {
*timeout = alt_timeout;
return -E2BIG;
}
@@ -4744,10 +4863,12 @@
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
- dev_dbg(&udev->dev, "Hub-initiated %s disabled "
- "at request of driver %s\n",
- state_name, driver->name);
- return xhci_get_timeout_no_hub_lpm(udev, state);
+ dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+ state_name, driver->name);
+ timeout = xhci_get_timeout_no_hub_lpm(udev,
+ state);
+ if (timeout == USB3_LPM_DISABLED)
+ return timeout;
}
}
@@ -5031,17 +5152,34 @@
hcd->has_tt = 1;
} else {
/*
- * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
- * minor revision instead of sbrn
+ * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+ * should return 0x31 for sbrn, or that the minor revision
+ * is a two digit BCD containig minor and sub-minor numbers.
+ * This was later clarified in xHCI 1.2.
+ *
+ * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+ * minor revision set to 0x1 instead of 0x10.
*/
- minor_rev = xhci->usb3_rhub.min_rev;
- if (minor_rev) {
+ if (xhci->usb3_rhub.min_rev == 0x1)
+ minor_rev = 1;
+ else
+ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+
+ switch (minor_rev) {
+ case 2:
+ hcd->speed = HCD_USB32;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ hcd->self.root_hub->rx_lanes = 2;
+ hcd->self.root_hub->tx_lanes = 2;
+ break;
+ case 1:
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ break;
}
- xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
+ xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
minor_rev,
- minor_rev ? "Enhanced" : "");
+ minor_rev ? "Enhanced " : "");
xhci->usb3_rhub.hcd = hcd;
/* xHCI private pointer was set in xhci_pci_probe for the second
@@ -5133,6 +5271,27 @@
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);
+static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned long flags;
+
+ xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ udev = (struct usb_device *)ep->hcpriv;
+ slot_id = udev->slot_id;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
+ xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5142,7 +5301,7 @@
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
@@ -5155,6 +5314,7 @@
/*
* managing i/o requests and associated device resources
*/
+ .map_urb_for_dma = xhci_map_urb_for_dma,
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
@@ -5163,6 +5323,7 @@
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
+ .endpoint_disable = xhci_endpoint_disable,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
@@ -5193,6 +5354,7 @@
.enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
.disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
.find_raw_port_number = xhci_find_raw_port_number,
+ .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
};
void xhci_init_driver(struct hc_driver *drv,