Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index fd9adca..7fc058f 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,17 +1,34 @@
 # SPDX-License-Identifier: GPL-2.0-only
-menuconfig THUNDERBOLT
-	tristate "Thunderbolt support"
+menuconfig USB4
+	tristate "Unified support for USB4 and Thunderbolt"
 	depends on PCI
-	depends on X86 || COMPILE_TEST
 	select APPLE_PROPERTIES if EFI_STUB && X86
 	select CRC32
 	select CRYPTO
 	select CRYPTO_HASH
 	select NVMEM
 	help
-	  Thunderbolt Controller driver. This driver is required if you
-	  want to hotplug Thunderbolt devices on Apple hardware or on PCs
-	  with Intel Falcon Ridge or newer.
+	  USB4 and Thunderbolt driver. USB4 is the public specification
+	  based on the Thunderbolt 3 protocol. This driver is required if
+	  you want to hotplug Thunderbolt and USB4 compliant devices on
+	  Apple hardware or on PCs with Intel Falcon Ridge or newer.
 
 	  To compile this driver a module, choose M here. The module will be
 	  called thunderbolt.
+
+if USB4
+
+config USB4_DEBUGFS_WRITE
+	bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
+	help
+	  Enables writing to device configuration registers through
+	  debugfs interface.
+
+	  Only enable this if you know what you are doing! Never enable
+	  this for production systems or distro kernels.
+
+config USB4_KUNIT_TEST
+	bool "KUnit tests"
+	depends on KUNIT=y
+
+endif # USB4
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 001187c..5715373 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+obj-${CONFIG_USB4} := thunderbolt.o
 thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
+thunderbolt-objs += nvm.o retimer.o quirks.o
+
+thunderbolt-${CONFIG_ACPI} += acpi.o
+thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
+thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
new file mode 100644
index 0000000..6355fdf
--- /dev/null
+++ b/drivers/thunderbolt/acpi.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI support
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
+
+#include "tb.h"
+
+static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
+				    void **return_value)
+{
+	struct fwnode_reference_args args;
+	struct fwnode_handle *fwnode;
+	struct tb_nhi *nhi = data;
+	struct acpi_device *adev;
+	struct pci_dev *pdev;
+	struct device *dev;
+	int ret;
+
+	if (acpi_bus_get_device(handle, &adev))
+		return AE_OK;
+
+	fwnode = acpi_fwnode_handle(adev);
+	ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface",
+						 NULL, 0, 0, &args);
+	if (ret)
+		return AE_OK;
+
+	/* It needs to reference this NHI */
+	if (nhi->pdev->dev.fwnode != args.fwnode)
+		goto out_put;
+
+	/*
+	 * Try to find physical device walking upwards to the hierarcy.
+	 * We need to do this because the xHCI driver might not yet be
+	 * bound so the USB3 SuperSpeed ports are not yet created.
+	 */
+	dev = acpi_get_first_physical_node(adev);
+	while (!dev) {
+		adev = adev->parent;
+		if (!adev)
+			break;
+		dev = acpi_get_first_physical_node(adev);
+	}
+
+	if (!dev)
+		goto out_put;
+
+	/*
+	 * Check that the device is PCIe. This is because USB3
+	 * SuperSpeed ports have this property and they are not power
+	 * managed with the xHCI and the SuperSpeed hub so we create the
+	 * link from xHCI instead.
+	 */
+	while (dev && !dev_is_pci(dev))
+		dev = dev->parent;
+
+	if (!dev)
+		goto out_put;
+
+	/*
+	 * Check that this actually matches the type of device we
+	 * expect. It should either be xHCI or PCIe root/downstream
+	 * port.
+	 */
+	pdev = to_pci_dev(dev);
+	if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
+	    (pci_is_pcie(pdev) &&
+		(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+		 pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+		const struct device_link *link;
+
+		/*
+		 * Make them both active first to make sure the NHI does
+		 * not runtime suspend before the consumer. The
+		 * pm_runtime_put() below then allows the consumer to
+		 * runtime suspend again (which then allows NHI runtime
+		 * suspend too now that the device link is established).
+		 */
+		pm_runtime_get_sync(&pdev->dev);
+
+		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+				       DL_FLAG_AUTOREMOVE_SUPPLIER |
+				       DL_FLAG_RPM_ACTIVE |
+				       DL_FLAG_PM_RUNTIME);
+		if (link) {
+			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+				dev_name(&pdev->dev));
+		} else {
+			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+				 dev_name(&pdev->dev));
+		}
+
+		pm_runtime_put(&pdev->dev);
+	}
+
+out_put:
+	fwnode_handle_put(args.fwnode);
+	return AE_OK;
+}
+
+/**
+ * tb_acpi_add_links() - Add device links based on ACPI description
+ * @nhi: Pointer to NHI
+ *
+ * Goes over ACPI namespace finding tunneled ports that reference to
+ * @nhi ACPI node. For each reference a device link is added. The link
+ * is automatically removed by the driver core.
+ */
+void tb_acpi_add_links(struct tb_nhi *nhi)
+{
+	acpi_status status;
+
+	if (!has_acpi_companion(&nhi->pdev->dev))
+		return;
+
+	/*
+	 * Find all devices that have usb4-host-controller interface
+	 * property that references to this NHI.
+	 */
+	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
+				     tb_acpi_add_link, NULL, nhi, NULL);
+	if (ACPI_FAILURE(status))
+		dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
+}
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8bf8e03..6f571e9 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -15,14 +15,6 @@
 #define VSE_CAP_OFFSET_MAX	0xffff
 #define TMU_ACCESS_EN		BIT(20)
 
-struct tb_cap_any {
-	union {
-		struct tb_cap_basic basic;
-		struct tb_cap_extended_short extended_short;
-		struct tb_cap_extended_long extended_long;
-	};
-} __packed;
-
 static int tb_port_enable_tmu(struct tb_port *port, bool enable)
 {
 	struct tb_switch *sw = port->sw;
@@ -33,9 +25,9 @@
 	 * Legacy devices need to have TMU access enabled before port
 	 * space can be fully accessed.
 	 */
-	if (tb_switch_is_lr(sw))
+	if (tb_switch_is_light_ridge(sw))
 		offset = 0x26;
-	else if (tb_switch_is_er(sw))
+	else if (tb_switch_is_eagle_ridge(sw))
 		offset = 0x2a;
 	else
 		return 0;
@@ -60,30 +52,57 @@
 	 * reading stale data on next read perform one dummy read after
 	 * port capabilities are walked.
 	 */
-	if (tb_switch_is_lr(port->sw)) {
+	if (tb_switch_is_light_ridge(port->sw)) {
 		u32 dummy;
 
 		tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
 	}
 }
 
+/**
+ * tb_port_next_cap() - Return next capability in the linked list
+ * @port: Port to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Returns dword offset of the next capability in port config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_port_next_cap(struct tb_port *port, unsigned int offset)
+{
+	struct tb_cap_any header;
+	int ret;
+
+	if (!offset)
+		return port->config.first_cap_offset;
+
+	ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+	if (ret)
+		return ret;
+
+	return header.basic.next;
+}
+
 static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
 {
-	u32 offset = 1;
+	int offset = 0;
 
 	do {
 		struct tb_cap_any header;
 		int ret;
 
+		offset = tb_port_next_cap(port, offset);
+		if (offset < 0)
+			return offset;
+
 		ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
 		if (ret)
 			return ret;
 
 		if (header.basic.cap == cap)
 			return offset;
-
-		offset = header.basic.next;
-	} while (offset);
+	} while (offset > 0);
 
 	return -ENOENT;
 }
@@ -113,23 +132,78 @@
 	return ret;
 }
 
-static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+/**
+ * tb_switch_next_cap() - Return next capability in the linked list
+ * @sw: Switch to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Finds dword offset of the next capability in router config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
 {
-	int offset = sw->config.first_cap_offset;
+	struct tb_cap_any header;
+	int ret;
 
-	while (offset > 0 && offset < CAP_OFFSET_MAX) {
+	if (!offset)
+		return sw->config.first_cap_offset;
+
+	ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+	if (ret)
+		return ret;
+
+	switch (header.basic.cap) {
+	case TB_SWITCH_CAP_TMU:
+		ret = header.basic.next;
+		break;
+
+	case TB_SWITCH_CAP_VSE:
+		if (!header.extended_short.length)
+			ret = header.extended_long.next;
+		else
+			ret = header.extended_short.next;
+		break;
+
+	default:
+		tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
+			  header.basic.cap, offset);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
+}
+
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+{
+	int offset = 0;
+
+	do {
 		struct tb_cap_any header;
 		int ret;
 
+		offset = tb_switch_next_cap(sw, offset);
+		if (offset < 0)
+			return offset;
+
 		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
 		if (ret)
 			return ret;
 
 		if (header.basic.cap == cap)
 			return offset;
-
-		offset = header.basic.next;
-	}
+	} while (offset);
 
 	return -ENOENT;
 }
@@ -146,37 +220,24 @@
  */
 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
 {
-	struct tb_cap_any header;
-	int offset;
+	int offset = 0;
 
-	offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
-	if (offset < 0)
-		return offset;
-
-	while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
+	do {
+		struct tb_cap_any header;
 		int ret;
 
-		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+		offset = tb_switch_next_cap(sw, offset);
+		if (offset < 0)
+			return offset;
+
+		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
 		if (ret)
 			return ret;
 
-		/*
-		 * Extended vendor specific capabilities come in two
-		 * flavors: short and long. The latter is used when
-		 * offset is over 0xff.
-		 */
-		if (offset >= CAP_OFFSET_MAX) {
-			if (header.extended_long.vsec_id == vsec)
-				return offset;
-			offset = header.extended_long.next;
-		} else {
-			if (header.extended_short.vsec_id == vsec)
-				return offset;
-			if (!header.extended_short.length)
-				return -ENOENT;
-			offset = header.extended_short.next;
-		}
-	}
+		if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
+		    header.extended_short.vsec_id == vsec)
+			return offset;
+	} while (offset);
 
 	return -ENOENT;
 }
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8..9894b8f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -219,6 +219,7 @@
 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
 {
 	struct cfg_error_pkg *pkg = response->buffer;
+	struct tb_ctl *ctl = response->ctl;
 	struct tb_cfg_result res = { 0 };
 	res.response_route = tb_cfg_get_route(&pkg->header);
 	res.response_port = 0;
@@ -227,9 +228,13 @@
 	if (res.err)
 		return res;
 
-	WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
-	WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
-	WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
+	if (pkg->zero1)
+		tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
+	if (pkg->zero2)
+		tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
+	if (pkg->zero3)
+		tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
+
 	res.err = 1;
 	res.tb_error = pkg->error;
 	res.response_port = pkg->port;
@@ -266,9 +271,8 @@
 		 * Invalid cfg_space/offset/length combination in
 		 * cfg_read/cfg_write.
 		 */
-		tb_ctl_WARN(ctl,
-			"CFG_ERROR(%llx:%x): Invalid config space or offset\n",
-			res->response_route, res->response_port);
+		tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
+			   res->response_route, res->response_port);
 		return;
 	case TB_CFG_ERROR_NO_SUCH_PORT:
 		/*
@@ -283,6 +287,10 @@
 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
 			res->response_route, res->response_port);
 		return;
+	case TB_CFG_ERROR_LOCK:
+		tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
+			    res->response_route, res->response_port);
+		return;
 	default:
 		/* 5,6,7,9 and 11 are also valid error codes */
 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
@@ -453,7 +461,7 @@
 				   "RX: checksum mismatch, dropping packet\n");
 			goto rx;
 		}
-		/* Fall through */
+		fallthrough;
 	case TB_CFG_PKG_ICM_EVENT:
 		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
 			goto rx;
@@ -708,19 +716,26 @@
 /* public interface, commands */
 
 /**
- * tb_cfg_error() - send error packet
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
  *
- * Return: Returns 0 on success or an error code on failure.
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
  */
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
-		 enum tb_cfg_error error)
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
 {
 	struct cfg_error_pkg pkg = {
 		.header = tb_cfg_make_header(route),
 		.port = port,
-		.error = error,
+		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+			     : TB_CFG_ERROR_PG_HOT_PLUG,
 	};
-	tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
+	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+		   unplug ? "un" : "", route, port);
 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
 }
 
@@ -944,6 +959,9 @@
 		return -ENODEV;
 
 	tb_cfg_print_error(ctl, res);
+
+	if (res->tb_error == TB_CFG_ERROR_LOCK)
+		return -EACCES;
 	return -EIO;
 }
 
@@ -962,8 +980,8 @@
 		return tb_cfg_get_error(ctl, space, &res);
 
 	case -ETIMEDOUT:
-		tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
-			    space, offset);
+		tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+			    route, space, offset);
 		break;
 
 	default:
@@ -988,8 +1006,8 @@
 		return tb_cfg_get_error(ctl, space, &res);
 
 	case -ETIMEDOUT:
-		tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
-			    space, offset);
+		tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+			    route, space, offset);
 		break;
 
 	default:
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2f1a1e1..97cb03b 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -123,8 +123,7 @@
 	return header;
 }
 
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
-		 enum tb_cfg_error error);
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
 				  int timeout_msec);
 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
new file mode 100644
index 0000000..ed65d2b
--- /dev/null
+++ b/drivers/thunderbolt/debugfs.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Gil Fine <gil.fine@intel.com>
+ *	    Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "tb.h"
+
+#define PORT_CAP_PCIE_LEN	1
+#define PORT_CAP_POWER_LEN	2
+#define PORT_CAP_LANE_LEN	3
+#define PORT_CAP_USB3_LEN	5
+#define PORT_CAP_DP_LEN		8
+#define PORT_CAP_TMU_LEN	8
+#define PORT_CAP_BASIC_LEN	9
+#define PORT_CAP_USB4_LEN	20
+
+#define SWITCH_CAP_TMU_LEN	26
+#define SWITCH_CAP_BASIC_LEN	27
+
+#define PATH_LEN		2
+
+#define COUNTER_SET_LEN		3
+
+#define DEBUGFS_ATTR(__space, __write)					\
+static int __space ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __space ## _show, inode->i_private);	\
+}									\
+									\
+static const struct file_operations __space ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __space ## _open,					\
+	.release = single_release,					\
+	.read  = seq_read,						\
+	.write = __write,						\
+	.llseek = seq_lseek,						\
+}
+
+#define DEBUGFS_ATTR_RO(__space)					\
+	DEBUGFS_ATTR(__space, NULL)
+
+#define DEBUGFS_ATTR_RW(__space)					\
+	DEBUGFS_ATTR(__space, __space ## _write)
+
+static struct dentry *tb_debugfs_root;
+
+static void *validate_and_copy_from_user(const void __user *user_buf,
+					 size_t *count)
+{
+	size_t nbytes;
+	void *buf;
+
+	if (!*count)
+		return ERR_PTR(-EINVAL);
+
+	if (!access_ok(user_buf, *count))
+		return ERR_PTR(-EFAULT);
+
+	buf = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	nbytes = min_t(size_t, *count, PAGE_SIZE);
+	if (copy_from_user(buf, user_buf, nbytes)) {
+		free_page((unsigned long)buf);
+		return ERR_PTR(-EFAULT);
+	}
+
+	*count = nbytes;
+	return buf;
+}
+
+static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
+		       int long_fmt_len)
+{
+	char *token;
+	u32 v[5];
+	int ret;
+
+	token = strsep(line, "\n");
+	if (!token)
+		return false;
+
+	/*
+	 * For Adapter/Router configuration space:
+	 * Short format is: offset value\n
+	 *		    v[0]   v[1]
+	 * Long format as produced from the read side:
+	 * offset relative_offset cap_id vs_cap_id value\n
+	 * v[0]   v[1]            v[2]   v[3]      v[4]
+	 *
+	 * For Counter configuration space:
+	 * Short format is: offset\n
+	 *		    v[0]
+	 * Long format as produced from the read side:
+	 * offset relative_offset counter_id value\n
+	 * v[0]   v[1]            v[2]       v[3]
+	 */
+	ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
+	/* In case of Counters, clear counter, "val" content is NA */
+	if (ret == short_fmt_len) {
+		*offs = v[0];
+		*val = v[short_fmt_len - 1];
+		return true;
+	} else if (ret == long_fmt_len) {
+		*offs = v[0];
+		*val = v[long_fmt_len - 1];
+		return true;
+	}
+
+	return false;
+}
+
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
+			  const char __user *user_buf, size_t count,
+			  loff_t *ppos)
+{
+	struct tb *tb = sw->tb;
+	char *line, *buf;
+	u32 val, offset;
+	int ret = 0;
+
+	buf = validate_and_copy_from_user(user_buf, &count);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	/* User did hardware changes behind the driver's back */
+	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+	line = buf;
+	while (parse_line(&line, &offset, &val, 2, 5)) {
+		if (port)
+			ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
+		else
+			ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+		if (ret)
+			break;
+	}
+
+	mutex_unlock(&tb->lock);
+
+out:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+	free_page((unsigned long)buf);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct tb_port *port = s->private;
+
+	return regs_write(port->sw, port, user_buf, count, ppos);
+}
+
+static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct tb_switch *sw = s->private;
+
+	return regs_write(sw, NULL, user_buf, count, ppos);
+}
+#define DEBUGFS_MODE		0600
+#else
+#define port_regs_write		NULL
+#define switch_regs_write	NULL
+#define DEBUGFS_MODE		0400
+#endif
+
+static int port_clear_all_counters(struct tb_port *port)
+{
+	u32 *buf;
+	int ret;
+
+	buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
+		      GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
+			    COUNTER_SET_LEN * port->config.max_counters);
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t counters_write(struct file *file, const char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct tb_port *port = s->private;
+	struct tb_switch *sw = port->sw;
+	struct tb *tb = port->sw->tb;
+	char *buf;
+	int ret;
+
+	buf = validate_and_copy_from_user(user_buf, &count);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	/* If written delimiter only, clear all counters in one shot */
+	if (buf[0] == '\n') {
+		ret = port_clear_all_counters(port);
+	} else  {
+		char *line = buf;
+		u32 val, offset;
+
+		ret = -EINVAL;
+		while (parse_line(&line, &offset, &val, 1, 4)) {
+			ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
+					    offset, 1);
+			if (ret)
+				break;
+		}
+	}
+
+	mutex_unlock(&tb->lock);
+
+out:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+	free_page((unsigned long)buf);
+
+	return ret < 0 ? ret : count;
+}
+
+static void cap_show(struct seq_file *s, struct tb_switch *sw,
+		     struct tb_port *port, unsigned int cap, u8 cap_id,
+		     u8 vsec_id, int length)
+{
+	int ret, offset = 0;
+
+	while (length > 0) {
+		int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
+		u32 data[TB_MAX_CONFIG_RW_LENGTH];
+
+		if (port)
+			ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
+					   dwords);
+		else
+			ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
+		if (ret) {
+			seq_printf(s, "0x%04x <not accessible>\n",
+				   cap + offset);
+			if (dwords > 1)
+				seq_printf(s, "0x%04x ...\n", cap + offset + 1);
+			return;
+		}
+
+		for (i = 0; i < dwords; i++) {
+			seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
+				   cap + offset + i, offset + i,
+				   cap_id, vsec_id, data[i]);
+		}
+
+		length -= dwords;
+		offset += dwords;
+	}
+}
+
+static void port_cap_show(struct tb_port *port, struct seq_file *s,
+			  unsigned int cap)
+{
+	struct tb_cap_any header;
+	u8 vsec_id = 0;
+	size_t length;
+	int ret;
+
+	ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
+	if (ret) {
+		seq_printf(s, "0x%04x <capability read failed>\n", cap);
+		return;
+	}
+
+	switch (header.basic.cap) {
+	case TB_PORT_CAP_PHY:
+		length = PORT_CAP_LANE_LEN;
+		break;
+
+	case TB_PORT_CAP_TIME1:
+		length = PORT_CAP_TMU_LEN;
+		break;
+
+	case TB_PORT_CAP_POWER:
+		length = PORT_CAP_POWER_LEN;
+		break;
+
+	case TB_PORT_CAP_ADAP:
+		if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
+			length = PORT_CAP_PCIE_LEN;
+		} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
+			length = PORT_CAP_DP_LEN;
+		} else if (tb_port_is_usb3_down(port) ||
+			   tb_port_is_usb3_up(port)) {
+			length = PORT_CAP_USB3_LEN;
+		} else {
+			seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+				   cap, header.basic.cap);
+			return;
+		}
+		break;
+
+	case TB_PORT_CAP_VSE:
+		if (!header.extended_short.length) {
+			ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
+					   cap + 1, 1);
+			if (ret) {
+				seq_printf(s, "0x%04x <capability read failed>\n",
+					   cap + 1);
+				return;
+			}
+			length = header.extended_long.length;
+			vsec_id = header.extended_short.vsec_id;
+		} else {
+			length = header.extended_short.length;
+			vsec_id = header.extended_short.vsec_id;
+			/*
+			 * Ice Lake and Tiger Lake do not implement the
+			 * full length of the capability, only first 32
+			 * dwords so hard-code it here.
+			 */
+			if (!vsec_id &&
+			    (tb_switch_is_ice_lake(port->sw) ||
+			     tb_switch_is_tiger_lake(port->sw)))
+				length = 32;
+		}
+		break;
+
+	case TB_PORT_CAP_USB4:
+		length = PORT_CAP_USB4_LEN;
+		break;
+
+	default:
+		seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+			   cap, header.basic.cap);
+		return;
+	}
+
+	cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
+}
+
+static void port_caps_show(struct tb_port *port, struct seq_file *s)
+{
+	int cap;
+
+	cap = tb_port_next_cap(port, 0);
+	while (cap > 0) {
+		port_cap_show(port, s, cap);
+		cap = tb_port_next_cap(port, cap);
+	}
+}
+
+static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
+{
+	u32 data[PORT_CAP_BASIC_LEN];
+	int ret, i;
+
+	ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(data); i++)
+		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+	return 0;
+}
+
+static int port_regs_show(struct seq_file *s, void *not_used)
+{
+	struct tb_port *port = s->private;
+	struct tb_switch *sw = port->sw;
+	struct tb *tb = sw->tb;
+	int ret;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out_rpm_put;
+	}
+
+	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+	ret = port_basic_regs_show(port, s);
+	if (ret)
+		goto out_unlock;
+
+	port_caps_show(port, s);
+
+out_unlock:
+	mutex_unlock(&tb->lock);
+out_rpm_put:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
+	return ret;
+}
+DEBUGFS_ATTR_RW(port_regs);
+
+static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
+			    unsigned int cap)
+{
+	struct tb_cap_any header;
+	int ret, length;
+	u8 vsec_id = 0;
+
+	ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
+	if (ret) {
+		seq_printf(s, "0x%04x <capability read failed>\n", cap);
+		return;
+	}
+
+	if (header.basic.cap == TB_SWITCH_CAP_VSE) {
+		if (!header.extended_short.length) {
+			ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
+					 cap + 1, 1);
+			if (ret) {
+				seq_printf(s, "0x%04x <capability read failed>\n",
+					   cap + 1);
+				return;
+			}
+			length = header.extended_long.length;
+		} else {
+			length = header.extended_short.length;
+		}
+		vsec_id = header.extended_short.vsec_id;
+	} else {
+		if (header.basic.cap == TB_SWITCH_CAP_TMU) {
+			length = SWITCH_CAP_TMU_LEN;
+		} else  {
+			seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
+				   cap, header.basic.cap);
+			return;
+		}
+	}
+
+	cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
+}
+
+static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
+{
+	int cap;
+
+	cap = tb_switch_next_cap(sw, 0);
+	while (cap > 0) {
+		switch_cap_show(sw, s, cap);
+		cap = tb_switch_next_cap(sw, cap);
+	}
+}
+
+static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
+{
+	u32 data[SWITCH_CAP_BASIC_LEN];
+	size_t dwords;
+	int ret, i;
+
+	/* Only USB4 has the additional registers */
+	if (tb_switch_is_usb4(sw))
+		dwords = ARRAY_SIZE(data);
+	else
+		dwords = 7;
+
+	ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dwords; i++)
+		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+	return 0;
+}
+
+static int switch_regs_show(struct seq_file *s, void *not_used)
+{
+	struct tb_switch *sw = s->private;
+	struct tb *tb = sw->tb;
+	int ret;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out_rpm_put;
+	}
+
+	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+	ret = switch_basic_regs_show(sw, s);
+	if (ret)
+		goto out_unlock;
+
+	switch_caps_show(sw, s);
+
+out_unlock:
+	mutex_unlock(&tb->lock);
+out_rpm_put:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
+	return ret;
+}
+DEBUGFS_ATTR_RW(switch_regs);
+
+static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
+{
+	u32 data[PATH_LEN];
+	int ret, i;
+
+	ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
+			   ARRAY_SIZE(data));
+	if (ret) {
+		seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data); i++) {
+		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+			   hopid * PATH_LEN + i, i, hopid, data[i]);
+	}
+
+	return 0;
+}
+
+static int path_show(struct seq_file *s, void *not_used)
+{
+	struct tb_port *port = s->private;
+	struct tb_switch *sw = port->sw;
+	struct tb *tb = sw->tb;
+	int start, i, ret = 0;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out_rpm_put;
+	}
+
+	seq_puts(s, "# offset relative_offset in_hop_id value\n");
+
+	/* NHI and lane adapters have entry for path 0 */
+	if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
+		ret = path_show_one(port, s, 0);
+		if (ret)
+			goto out_unlock;
+	}
+
+	start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
+
+	for (i = start; i <= port->config.max_in_hop_id; i++) {
+		ret = path_show_one(port, s, i);
+		if (ret)
+			break;
+	}
+
+out_unlock:
+	mutex_unlock(&tb->lock);
+out_rpm_put:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
+	return ret;
+}
+DEBUGFS_ATTR_RO(path);
+
+static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
+				 int counter)
+{
+	u32 data[COUNTER_SET_LEN];
+	int ret, i;
+
+	ret = tb_port_read(port, data, TB_CFG_COUNTERS,
+			   counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
+	if (ret) {
+		seq_printf(s, "0x%04x <not accessible>\n",
+			   counter * COUNTER_SET_LEN);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data); i++) {
+		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+			   counter * COUNTER_SET_LEN + i, i, counter, data[i]);
+	}
+
+	return 0;
+}
+
+static int counters_show(struct seq_file *s, void *not_used)
+{
+	struct tb_port *port = s->private;
+	struct tb_switch *sw = port->sw;
+	struct tb *tb = sw->tb;
+	int i, ret = 0;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	seq_puts(s, "# offset relative_offset counter_id value\n");
+
+	for (i = 0; i < port->config.max_counters; i++) {
+		ret = counter_set_regs_show(port, s, i);
+		if (ret)
+			break;
+	}
+
+	mutex_unlock(&tb->lock);
+
+out:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
+	return ret;
+}
+DEBUGFS_ATTR_RW(counters);
+
+/**
+ * tb_switch_debugfs_init() - Add debugfs entries for router
+ * @sw: Pointer to the router
+ *
+ * Adds debugfs directories and files for given router.
+ */
+void tb_switch_debugfs_init(struct tb_switch *sw)
+{
+	struct dentry *debugfs_dir;
+	struct tb_port *port;
+
+	debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
+	sw->debugfs_dir = debugfs_dir;
+	debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
+			    &switch_regs_fops);
+
+	tb_switch_for_each_port(sw, port) {
+		struct dentry *debugfs_dir;
+		char dir_name[10];
+
+		if (port->disabled)
+			continue;
+		if (port->config.type == TB_TYPE_INACTIVE)
+			continue;
+
+		snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+		debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
+		debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
+				    port, &port_regs_fops);
+		debugfs_create_file("path", 0400, debugfs_dir, port,
+				    &path_fops);
+		if (port->config.counters_support)
+			debugfs_create_file("counters", 0600, debugfs_dir, port,
+					    &counters_fops);
+	}
+}
+
+/**
+ * tb_switch_debugfs_remove() - Remove all router debugfs entries
+ * @sw: Pointer to the router
+ *
+ * Removes all previously added debugfs entries under this router.
+ */
+void tb_switch_debugfs_remove(struct tb_switch *sw)
+{
+	debugfs_remove_recursive(sw->debugfs_dir);
+}
+
+void tb_debugfs_init(void)
+{
+	tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
+}
+
+void tb_debugfs_exit(void)
+{
+	debugfs_remove_recursive(tb_debugfs_root);
+}
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index b7980c8..f0de94f 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -147,10 +147,10 @@
 
 	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
 		if (!uuid_is_null(&uuids[i]))
-			ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
 					&uuids[i]);
 
-		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
 			       i < tb->nboot_acl - 1 ? "," : "\n");
 	}
 
@@ -275,7 +275,7 @@
 static umode_t domain_attr_is_visible(struct kobject *kobj,
 				      struct attribute *attr, int n)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct tb *tb = container_of(dev, struct tb, dev);
 
 	if (attr == &dev_attr_boot_acl.attr) {
@@ -455,6 +455,8 @@
 	/* This starts event processing */
 	mutex_unlock(&tb->lock);
 
+	device_init_wakeup(&tb->dev, true);
+
 	pm_runtime_no_callbacks(&tb->dev);
 	pm_runtime_set_active(&tb->dev);
 	pm_runtime_enable(&tb->dev);
@@ -544,6 +546,33 @@
 	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
 }
 
+int tb_domain_freeze_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->freeze_noirq)
+		ret = tb->cm_ops->freeze_noirq(tb);
+	if (!ret)
+		tb_ctl_stop(tb->ctl);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+int tb_domain_thaw_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	mutex_lock(&tb->lock);
+	tb_ctl_start(tb->ctl);
+	if (tb->cm_ops->thaw_noirq)
+		ret = tb->cm_ops->thaw_noirq(tb);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
 void tb_domain_complete(struct tb *tb)
 {
 	if (tb->cm_ops->complete)
@@ -798,12 +827,23 @@
 {
 	int ret;
 
+	tb_test_init();
+
+	tb_debugfs_init();
 	ret = tb_xdomain_init();
 	if (ret)
-		return ret;
+		goto err_debugfs;
 	ret = bus_register(&tb_bus_type);
 	if (ret)
-		tb_xdomain_exit();
+		goto err_xdomain;
+
+	return 0;
+
+err_xdomain:
+	tb_xdomain_exit();
+err_debugfs:
+	tb_debugfs_exit();
+	tb_test_exit();
 
 	return ret;
 }
@@ -812,6 +852,8 @@
 {
 	bus_unregister(&tb_bus_type);
 	ida_destroy(&tb_domain_ida);
-	tb_switch_exit();
+	tb_nvm_exit();
 	tb_xdomain_exit();
+	tb_debugfs_exit();
+	tb_test_exit();
 }
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index ee51964..0c8471b 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/crc32.h>
+#include <linux/delay.h>
 #include <linux/property.h>
 #include <linux/slab.h>
 #include "tb.h"
@@ -131,12 +132,51 @@
 }
 
 /**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+	struct tb_cap_plug_events cap;
+	int res;
+
+	if (!sw->cap_plug_events) {
+		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+		return -ENODEV;
+	}
+	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+			     sizeof(cap) / 4);
+	if (res)
+		return res;
+
+	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+		tb_sw_warn(sw, "no NVM\n");
+		return -ENODEV;
+	}
+
+	if (cap.drom_offset > 0xffff) {
+		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+				cap.drom_offset);
+		return -ENXIO;
+	}
+	*offset = cap.drom_offset;
+	return 0;
+}
+
+/**
  * tb_eeprom_read_n - read count bytes from offset into val
  */
 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
 		size_t count)
 {
+	u16 drom_offset;
 	int i, res;
+
+	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+	if (res)
+		return res;
+
+	offset += drom_offset;
+
 	res = tb_eeprom_active(sw, true);
 	if (res)
 		return res;
@@ -208,7 +248,7 @@
 
 struct tb_drom_entry_generic {
 	struct tb_drom_entry_header header;
-	u8 data[0];
+	u8 data[];
 } __packed;
 
 struct tb_drom_entry_port {
@@ -239,36 +279,6 @@
 
 
 /**
- * tb_eeprom_get_drom_offset - get drom offset within eeprom
- */
-static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
-{
-	struct tb_cap_plug_events cap;
-	int res;
-	if (!sw->cap_plug_events) {
-		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
-		return -ENOSYS;
-	}
-	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
-			     sizeof(cap) / 4);
-	if (res)
-		return res;
-
-	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
-		tb_sw_warn(sw, "no NVM\n");
-		return -ENOSYS;
-	}
-
-	if (cap.drom_offset > 0xffff) {
-		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
-				cap.drom_offset);
-		return -ENXIO;
-	}
-	*offset = cap.drom_offset;
-	return 0;
-}
-
-/**
  * tb_drom_read_uid_only - read uid directly from drom
  *
  * Does not use the cached copy in sw->drom. Used during resume to check switch
@@ -277,17 +287,11 @@
 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
 {
 	u8 data[9];
-	u16 drom_offset;
 	u8 crc;
-	int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
-	if (res)
-		return res;
-
-	if (drom_offset == 0)
-		return -ENODEV;
+	int res;
 
 	/* read uid */
-	res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+	res = tb_eeprom_read_n(sw, 0, data, 9);
 	if (res)
 		return res;
 
@@ -386,8 +390,8 @@
 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
 		if (pos + 1 == drom_size || pos + entry->len > drom_size
 				|| !entry->len) {
-			tb_sw_warn(sw, "drom buffer overrun, aborting\n");
-			return -EIO;
+			tb_sw_warn(sw, "DROM buffer overrun\n");
+			return -EILSEQ;
 		}
 
 		switch (entry->type) {
@@ -484,16 +488,47 @@
 	return ret;
 }
 
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+	int ret;
+
+	ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+	if (ret)
+		return ret;
+
+	/* Size includes CRC8 + UID + CRC32 */
+	*size += 1 + 8 + 4;
+	sw->drom = kzalloc(*size, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+
+	ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+	if (ret) {
+		kfree(sw->drom);
+		sw->drom = NULL;
+	}
+
+	return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+			  size_t count)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_drom_read(sw, offset, val, count);
+	return tb_eeprom_read_n(sw, offset, val, count);
+}
+
 /**
  * tb_drom_read - copy drom to sw->drom and parse it
  */
 int tb_drom_read(struct tb_switch *sw)
 {
-	u16 drom_offset;
 	u16 size;
 	u32 crc;
 	struct tb_drom_header *header;
-	int res;
+	int res, retries = 1;
+
 	if (sw->drom)
 		return 0;
 
@@ -510,29 +545,26 @@
 			goto parse;
 
 		/*
-		 * The root switch contains only a dummy drom (header only,
-		 * no entries). Hardcode the configuration here.
+		 * USB4 hosts may support reading DROM through router
+		 * operations.
 		 */
-		tb_drom_read_uid_only(sw, &sw->uid);
-
-		sw->ports[1].link_nr = 0;
-		sw->ports[2].link_nr = 1;
-		sw->ports[1].dual_link_port = &sw->ports[2];
-		sw->ports[2].dual_link_port = &sw->ports[1];
-
-		sw->ports[3].link_nr = 0;
-		sw->ports[4].link_nr = 1;
-		sw->ports[3].dual_link_port = &sw->ports[4];
-		sw->ports[4].dual_link_port = &sw->ports[3];
+		if (tb_switch_is_usb4(sw)) {
+			usb4_switch_read_uid(sw, &sw->uid);
+			if (!usb4_copy_host_drom(sw, &size))
+				goto parse;
+		} else {
+			/*
+			 * The root switch contains only a dummy drom
+			 * (header only, no entries). Hardcode the
+			 * configuration here.
+			 */
+			tb_drom_read_uid_only(sw, &sw->uid);
+		}
 
 		return 0;
 	}
 
-	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
-	if (res)
-		return res;
-
-	res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+	res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
 	if (res)
 		return res;
 	size &= 0x3ff;
@@ -546,7 +578,7 @@
 	sw->drom = kzalloc(size, GFP_KERNEL);
 	if (!sw->drom)
 		return -ENOMEM;
-	res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+	res = tb_drom_read_n(sw, 0, sw->drom, size);
 	if (res)
 		goto err;
 
@@ -569,6 +601,7 @@
 		sw->uid = header->uid;
 	sw->vendor = header->vendor_id;
 	sw->device = header->model_id;
+	tb_check_quirks(sw);
 
 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
 	if (crc != header->data_crc32) {
@@ -581,7 +614,17 @@
 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
 			header->device_rom_revision);
 
-	return tb_drom_parse_entries(sw);
+	res = tb_drom_parse_entries(sw);
+	/* If the DROM parsing fails, wait a moment and retry once */
+	if (res == -EILSEQ && retries--) {
+		tb_sw_warn(sw, "parsing DROM failed, retrying\n");
+		msleep(100);
+		res = tb_drom_read_n(sw, 0, sw->drom, size);
+		if (!res)
+			goto parse;
+	}
+
+	return res;
 err:
 	kfree(sw->drom);
 	sw->drom = NULL;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 2f932b6..82c46b2 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -11,6 +11,7 @@
 
 #include <linux/delay.h>
 #include <linux/mutex.h>
+#include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/platform_data/x86/apple.h>
@@ -43,6 +44,10 @@
 #define ICM_APPROVE_TIMEOUT		10000	/* ms */
 #define ICM_MAX_LINK			4
 
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
 /**
  * struct icm - Internal connection manager private data
  * @request_lock: Makes sure only one message is send to ICM at time
@@ -109,7 +114,7 @@
 struct ep_name_entry {
 	u8 len;
 	u8 type;
-	u8 data[0];
+	u8 data[];
 };
 
 #define EP_NAME_INTEL_VSS	0x10
@@ -147,6 +152,17 @@
 	return NULL;
 }
 
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+	const struct intel_vss *vss;
+
+	vss = parse_intel_vss(ep_name, size);
+	if (vss)
+		return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+	return false;
+}
+
 static inline struct tb *icm_to_tb(struct icm *icm)
 {
 	return ((void *)icm - sizeof(struct tb));
@@ -339,6 +355,14 @@
 	}
 }
 
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+	u32 val;
+
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	return !!(val & REG_FW_STS_ICM_EN);
+}
+
 static bool icm_fr_is_supported(struct tb *tb)
 {
 	return !x86_apple_machine;
@@ -562,58 +586,42 @@
 	return 0;
 }
 
-static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
-				    const uuid_t *uuid, const u8 *ep_name,
-				    size_t ep_name_size, u8 connection_id,
-				    u8 connection_key, u8 link, u8 depth,
-				    enum tb_security_level security_level,
-				    bool authorized, bool boot)
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+				      const uuid_t *uuid)
 {
-	const struct intel_vss *vss;
+	struct tb *tb = parent_sw->tb;
 	struct tb_switch *sw;
-	int ret;
 
-	pm_runtime_get_sync(&parent_sw->dev);
-
-	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
-	if (IS_ERR(sw))
-		goto out;
+	sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+	if (IS_ERR(sw)) {
+		tb_warn(tb, "failed to allocate switch at %llx\n", route);
+		return sw;
+	}
 
 	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
 	if (!sw->uuid) {
-		tb_sw_warn(sw, "cannot allocate memory for switch\n");
 		tb_switch_put(sw);
-		goto out;
+		return ERR_PTR(-ENOMEM);
 	}
-	sw->connection_id = connection_id;
-	sw->connection_key = connection_key;
-	sw->link = link;
-	sw->depth = depth;
-	sw->authorized = authorized;
-	sw->security_level = security_level;
-	sw->boot = boot;
-	init_completion(&sw->rpm_complete);
 
-	vss = parse_intel_vss(ep_name, ep_name_size);
-	if (vss)
-		sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+	init_completion(&sw->rpm_complete);
+	return sw;
+}
+
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+	u64 route = tb_route(sw);
+	int ret;
 
 	/* Link the two switches now */
 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
 
 	ret = tb_switch_add(sw);
-	if (ret) {
+	if (ret)
 		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
-		tb_switch_put(sw);
-		sw = ERR_PTR(ret);
-	}
 
-out:
-	pm_runtime_mark_last_busy(&parent_sw->dev);
-	pm_runtime_put_autosuspend(&parent_sw->dev);
-
-	return sw;
+	return ret;
 }
 
 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -697,11 +705,11 @@
 		(const struct icm_fr_event_device_connected *)hdr;
 	enum tb_security_level security_level;
 	struct tb_switch *sw, *parent_sw;
+	bool boot, dual_lane, speed_gen3;
 	struct icm *icm = tb_priv(tb);
 	bool authorized = false;
 	struct tb_xdomain *xd;
 	u8 link, depth;
-	bool boot;
 	u64 route;
 	int ret;
 
@@ -714,6 +722,8 @@
 	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 			 ICM_FLAGS_SLEVEL_SHIFT;
 	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+	dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+	speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
 
 	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
@@ -811,10 +821,27 @@
 		return;
 	}
 
-	add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
-		   sizeof(pkg->ep_name), pkg->connection_id,
-		   pkg->connection_key, link, depth, security_level,
-		   authorized, boot);
+	pm_runtime_get_sync(&parent_sw->dev);
+
+	sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+	if (!IS_ERR(sw)) {
+		sw->connection_id = pkg->connection_id;
+		sw->connection_key = pkg->connection_key;
+		sw->link = link;
+		sw->depth = depth;
+		sw->authorized = authorized;
+		sw->security_level = security_level;
+		sw->boot = boot;
+		sw->link_speed = speed_gen3 ? 20 : 10;
+		sw->link_width = dual_lane ? 2 : 1;
+		sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+		if (add_switch(parent_sw, sw))
+			tb_switch_put(sw);
+	}
+
+	pm_runtime_mark_last_busy(&parent_sw->dev);
+	pm_runtime_put_autosuspend(&parent_sw->dev);
 
 	tb_switch_put(parent_sw);
 }
@@ -1142,10 +1169,10 @@
 {
 	const struct icm_tr_event_device_connected *pkg =
 		(const struct icm_tr_event_device_connected *)hdr;
+	bool authorized, boot, dual_lane, speed_gen3;
 	enum tb_security_level security_level;
 	struct tb_switch *sw, *parent_sw;
 	struct tb_xdomain *xd;
-	bool authorized, boot;
 	u64 route;
 
 	icm_postpone_rescan(tb);
@@ -1163,6 +1190,8 @@
 	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 			 ICM_FLAGS_SLEVEL_SHIFT;
 	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+	dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+	speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
 
 	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
@@ -1205,11 +1234,27 @@
 		return;
 	}
 
-	sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
-			sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
-			security_level, authorized, boot);
-	if (!IS_ERR(sw) && force_rtd3)
-		sw->rpm = true;
+	pm_runtime_get_sync(&parent_sw->dev);
+
+	sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+	if (!IS_ERR(sw)) {
+		sw->connection_id = pkg->connection_id;
+		sw->authorized = authorized;
+		sw->security_level = security_level;
+		sw->boot = boot;
+		sw->link_speed = speed_gen3 ? 20 : 10;
+		sw->link_width = dual_lane ? 2 : 1;
+		sw->rpm = force_rtd3;
+		if (!sw->rpm)
+			sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+						    sizeof(pkg->ep_name));
+
+		if (add_switch(parent_sw, sw))
+			tb_switch_put(sw);
+	}
+
+	pm_runtime_mark_last_busy(&parent_sw->dev);
+	pm_runtime_put_autosuspend(&parent_sw->dev);
 
 	tb_switch_put(parent_sw);
 }
@@ -1349,9 +1394,12 @@
 	/*
 	 * Starting from Alpine Ridge we can use ICM on Apple machines
 	 * as well. We just need to reset and re-enable it first.
+	 * However, only start it if explicitly asked by the user.
 	 */
-	if (!x86_apple_machine)
+	if (icm_firmware_running(tb->nhi))
 		return true;
+	if (!start_icm)
+		return false;
 
 	/*
 	 * Find the upstream PCIe port in case we need to do reset
@@ -1585,6 +1633,18 @@
 		icm_veto_end(tb);
 }
 
+static bool icm_tgl_is_supported(struct tb *tb)
+{
+	u32 val;
+
+	/*
+	 * If the firmware is not running use software CM. This platform
+	 * should fully support both.
+	 */
+	val = ioread32(tb->nhi->iobase + REG_FW_STS);
+	return !!(val & REG_FW_STS_NVM_AUTH_DONE);
+}
+
 static void icm_handle_notification(struct work_struct *work)
 {
 	struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -1704,8 +1764,7 @@
 	u32 val;
 
 	/* Check if the ICM firmware is already running */
-	val = ioread32(nhi->iobase + REG_FW_STS);
-	if (val & REG_FW_STS_ICM_EN)
+	if (icm_firmware_running(nhi))
 		return 0;
 
 	dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
@@ -1893,14 +1952,12 @@
  */
 static void icm_unplug_children(struct tb_switch *sw)
 {
-	unsigned int i;
+	struct tb_port *port;
 
 	if (tb_route(sw))
 		sw->is_unplugged = true;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
-
+	tb_switch_for_each_port(sw, port) {
 		if (port->xdomain)
 			port->xdomain->is_unplugged = true;
 		else if (tb_port_has_remote(port))
@@ -1940,11 +1997,9 @@
 
 static void icm_free_unplugged_children(struct tb_switch *sw)
 {
-	unsigned int i;
+	struct tb_port *port;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
-
+	tb_switch_for_each_port(sw, port) {
 		if (port->xdomain && port->xdomain->is_unplugged) {
 			tb_xdomain_remove(port->xdomain);
 			port->xdomain = NULL;
@@ -2220,7 +2275,22 @@
 
 	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
 	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
-		icm->is_supported = icm_ar_is_supported;
+		icm->is_supported = icm_fr_is_supported;
+		icm->driver_ready = icm_icl_driver_ready;
+		icm->set_uuid = icm_icl_set_uuid;
+		icm->device_connected = icm_icl_device_connected;
+		icm->device_disconnected = icm_tr_device_disconnected;
+		icm->xdomain_connected = icm_tr_xdomain_connected;
+		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+		icm->rtd3_veto = icm_icl_rtd3_veto;
+		tb->cm_ops = &icm_icl_ops;
+		break;
+
+	case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+	case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+	case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+	case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+		icm->is_supported = icm_tgl_is_supported;
 		icm->driver_ready = icm_icl_driver_ready;
 		icm->set_uuid = icm_icl_set_uuid;
 		icm->device_connected = icm_icl_device_connected;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index ae1e926..41e6c73 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -45,7 +45,7 @@
 	return sw->cap_lc + start + phys * size;
 }
 
-static int tb_lc_configure_lane(struct tb_port *port, bool configure)
+static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
 {
 	bool upstream = tb_is_upstream_port(port);
 	struct tb_switch *sw = port->sw;
@@ -69,7 +69,7 @@
 	else
 		lane = TB_LC_SX_CTRL_L2C;
 
-	if (configure) {
+	if (configured) {
 		ctrl |= lane;
 		if (upstream)
 			ctrl |= TB_LC_SX_CTRL_UPSTREAM;
@@ -83,55 +83,146 @@
 }
 
 /**
- * tb_lc_configure_link() - Let LC know about configured link
- * @sw: Switch that is being added
+ * tb_lc_configure_port() - Let LC know about configured port
+ * @port: Port that is set as configured
  *
- * Informs LC of both parent switch and @sw that there is established
- * link between the two.
+ * Sets the port configured for power management purposes.
  */
-int tb_lc_configure_link(struct tb_switch *sw)
+int tb_lc_configure_port(struct tb_port *port)
 {
-	struct tb_port *up, *down;
-	int ret;
-
-	if (!sw->config.enabled || !tb_route(sw))
-		return 0;
-
-	up = tb_upstream_port(sw);
-	down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
-
-	/* Configure parent link toward this switch */
-	ret = tb_lc_configure_lane(down, true);
-	if (ret)
-		return ret;
-
-	/* Configure upstream link from this switch to the parent */
-	ret = tb_lc_configure_lane(up, true);
-	if (ret)
-		tb_lc_configure_lane(down, false);
-
-	return ret;
+	return tb_lc_set_port_configured(port, true);
 }
 
 /**
- * tb_lc_unconfigure_link() - Let LC know about unconfigured link
- * @sw: Switch to unconfigure
+ * tb_lc_unconfigure_port() - Let LC know about unconfigured port
+ * @port: Port that is set as configured
  *
- * Informs LC of both parent switch and @sw that the link between the
- * two does not exist anymore.
+ * Sets the port unconfigured for power management purposes.
  */
-void tb_lc_unconfigure_link(struct tb_switch *sw)
+void tb_lc_unconfigure_port(struct tb_port *port)
 {
-	struct tb_port *up, *down;
+	tb_lc_set_port_configured(port, false);
+}
 
-	if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
-		return;
+static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
+{
+	struct tb_switch *sw = port->sw;
+	u32 ctrl, lane;
+	int cap, ret;
 
-	up = tb_upstream_port(sw);
-	down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+	if (sw->generation < 2)
+		return 0;
 
-	tb_lc_configure_lane(up, false);
-	tb_lc_configure_lane(down, false);
+	cap = find_port_lc_cap(port);
+	if (cap < 0)
+		return cap;
+
+	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+	if (ret)
+		return ret;
+
+	/* Resolve correct lane */
+	if (port->port % 2)
+		lane = TB_LC_SX_CTRL_L1D;
+	else
+		lane = TB_LC_SX_CTRL_L2D;
+
+	if (configure)
+		ctrl |= lane;
+	else
+		ctrl &= ~lane;
+
+	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
+ * @port: Switch downstream port connected to another host
+ *
+ * Sets the lane configured for XDomain accordingly so that the LC knows
+ * about this. Returns %0 in success and negative errno in failure.
+ */
+int tb_lc_configure_xdomain(struct tb_port *port)
+{
+	return tb_lc_set_xdomain_configured(port, true);
+}
+
+/**
+ * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
+ * @port: Switch downstream port that was connected to another host
+ *
+ * Unsets the lane XDomain configuration.
+ */
+void tb_lc_unconfigure_xdomain(struct tb_port *port)
+{
+	tb_lc_set_xdomain_configured(port, false);
+}
+
+static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
+			      unsigned int flags)
+{
+	u32 ctrl;
+	int ret;
+
+	/*
+	 * Enable wake on PCIe and USB4 (wake coming from another
+	 * router).
+	 */
+	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+			 offset + TB_LC_SX_CTRL, 1);
+	if (ret)
+		return ret;
+
+	ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
+		  TB_LC_SX_CTRL_WOU4);
+
+	if (flags & TB_WAKE_ON_CONNECT)
+		ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
+	if (flags & TB_WAKE_ON_USB4)
+		ctrl |= TB_LC_SX_CTRL_WOU4;
+	if (flags & TB_WAKE_ON_PCIE)
+		ctrl |= TB_LC_SX_CTRL_WOP;
+
+	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_set_wake() - Enable/disable wake
+ * @sw: Switch whose wakes to configure
+ * @flags: Wakeup flags (%0 to disable)
+ *
+ * For each LC sets wake bits accordingly.
+ */
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+	int start, size, nlc, ret, i;
+	u32 desc;
+
+	if (sw->generation < 2)
+		return 0;
+
+	if (!tb_route(sw))
+		return 0;
+
+	ret = read_lc_desc(sw, &desc);
+	if (ret)
+		return ret;
+
+	/* Figure out number of link controllers */
+	nlc = desc & TB_LC_DESC_NLC_MASK;
+	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+	/* For each link controller set sleep bit */
+	for (i = 0; i < nlc; i++) {
+		unsigned int offset = sw->cap_lc + start + i * size;
+
+		ret = tb_lc_set_wake_one(sw, offset, flags);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 /**
@@ -177,3 +268,206 @@
 
 	return 0;
 }
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+	struct tb_port *up;
+	int cap, ret;
+	u32 val;
+
+	if (sw->generation < 2)
+		return false;
+
+	up = tb_upstream_port(sw);
+	cap = find_port_lc_cap(up);
+	if (cap < 0)
+		return false;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+	if (ret)
+		return false;
+
+	return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+				   struct tb_port *in)
+{
+	struct tb_port *port;
+
+	/* The first DP IN port is sink 0 and second is sink 1 */
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_is_dpin(port))
+			return in != port;
+	}
+
+	return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+	u32 val, alloc;
+	int ret;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+	if (ret)
+		return ret;
+
+	/*
+	 * Sink is available for CM/SW to use if the allocation valie is
+	 * either 0 or 1.
+	 */
+	if (!sink) {
+		alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+			return 0;
+	} else {
+		alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+	int sink;
+
+	/*
+	 * For older generations sink is always available as there is no
+	 * allocation mechanism.
+	 */
+	if (sw->generation < 3)
+		return true;
+
+	sink = tb_lc_dp_sink_from_port(sw, in);
+	if (sink < 0)
+		return false;
+
+	return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+	int ret, sink;
+	u32 val;
+
+	if (sw->generation < 3)
+		return 0;
+
+	sink = tb_lc_dp_sink_from_port(sw, in);
+	if (sink < 0)
+		return sink;
+
+	ret = tb_lc_dp_sink_available(sw, sink);
+	if (ret)
+		return ret;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+	if (ret)
+		return ret;
+
+	if (!sink) {
+		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+		val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+	} else {
+		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+		val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+	}
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+	if (ret)
+		return ret;
+
+	tb_port_dbg(in, "sink %d allocated\n", sink);
+	return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+	int ret, sink;
+	u32 val;
+
+	if (sw->generation < 3)
+		return 0;
+
+	sink = tb_lc_dp_sink_from_port(sw, in);
+	if (sink < 0)
+		return sink;
+
+	/* Needs to be owned by CM/SW */
+	ret = tb_lc_dp_sink_available(sw, sink);
+	if (ret)
+		return ret;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+	if (ret)
+		return ret;
+
+	if (!sink)
+		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+	else
+		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+	if (ret)
+		return ret;
+
+	tb_port_dbg(in, "sink %d de-allocated\n", sink);
+	return 0;
+}
+
+/**
+ * tb_lc_force_power() - Forces LC to be powered on
+ * @sw: Thunderbolt switch
+ *
+ * This is useful to let authentication cycle pass even without
+ * a Thunderbolt link present.
+ */
+int tb_lc_force_power(struct tb_switch *sw)
+{
+	u32 in = 0xffff;
+
+	return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 73a698e..db80dc5 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
 
 #include "nhi.h"
 #include "nhi_regs.h"
@@ -24,12 +25,7 @@
 
 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
 
-/*
- * Used to enable end-to-end workaround for missing RX packets. Do not
- * use this ring for anything else.
- */
-#define RING_E2E_UNUSED_HOPID	2
-#define RING_FIRST_USABLE_HOPID	TB_PATH_MIN_HOPID
+#define RING_FIRST_USABLE_HOPID	1
 
 /*
  * Minimal number of vectors when we use MSI-X. Two for control channel
@@ -451,7 +447,7 @@
 
 		/*
 		 * Automatically allocate HopID from the non-reserved
-		 * range 8 .. hop_count - 1.
+		 * range 1 .. hop_count - 1.
 		 */
 		for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
 			if (ring->is_tx) {
@@ -507,10 +503,6 @@
 	dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
 		transmit ? "TX" : "RX", hop, size);
 
-	/* Tx Ring 2 is reserved for E2E workaround */
-	if (transmit && hop == RING_E2E_UNUSED_HOPID)
-		return NULL;
-
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 	if (!ring)
 		return NULL;
@@ -625,19 +617,6 @@
 		flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
 	}
 
-	if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
-		u32 hop;
-
-		/*
-		 * In order not to lose Rx packets we enable end-to-end
-		 * workaround which transfers Rx credits to an unused Tx
-		 * HopID.
-		 */
-		hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
-		hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
-		flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
-	}
-
 	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
 	if (ring->is_tx) {
 		ring_iowrite32desc(ring, ring->size, 12);
@@ -896,6 +875,22 @@
 	return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
 }
 
+static int nhi_freeze_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_freeze_noirq(tb);
+}
+
+static int nhi_thaw_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_thaw_noirq(tb);
+}
+
 static bool nhi_wake_supported(struct pci_dev *pdev)
 {
 	u8 val;
@@ -1102,6 +1097,69 @@
 	return true;
 }
 
+/*
+ * During suspend the Thunderbolt controller is reset and all PCIe
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. This adds device links between the tunneled PCIe
+ * downstream ports and the NHI so that the device core will make sure
+ * NHI is resumed first before the rest.
+ */
+static void tb_apple_add_links(struct tb_nhi *nhi)
+{
+	struct pci_dev *upstream, *pdev;
+
+	if (!x86_apple_machine)
+		return;
+
+	switch (nhi->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+		break;
+	default:
+		return;
+	}
+
+	upstream = pci_upstream_bridge(nhi->pdev);
+	while (upstream) {
+		if (!pci_is_pcie(upstream))
+			return;
+		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
+			break;
+		upstream = pci_upstream_bridge(upstream);
+	}
+
+	if (!upstream)
+		return;
+
+	/*
+	 * For each hotplug downstream port, create add device link
+	 * back to NHI so that PCIe tunnels can be re-established after
+	 * sleep.
+	 */
+	for_each_pci_bridge(pdev, upstream->subordinate) {
+		const struct device_link *link;
+
+		if (!pci_is_pcie(pdev))
+			continue;
+		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
+		    !pdev->is_hotplug_bridge)
+			continue;
+
+		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+				       DL_FLAG_AUTOREMOVE_SUPPLIER |
+				       DL_FLAG_PM_RUNTIME);
+		if (link) {
+			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+				dev_name(&pdev->dev));
+		} else {
+			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+				 dev_name(&pdev->dev));
+		}
+	}
+}
+
 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct tb_nhi *nhi;
@@ -1134,9 +1192,7 @@
 	/* cannot fail - table is allocated bin pcim_iomap_regions */
 	nhi->iobase = pcim_iomap_table(pdev)[0];
 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
-	if (nhi->hop_count != 12 && nhi->hop_count != 32)
-		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
-			 nhi->hop_count);
+	dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
 
 	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
 				     sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -1169,6 +1225,9 @@
 			return res;
 	}
 
+	tb_apple_add_links(nhi);
+	tb_acpi_add_links(nhi);
+
 	tb = icm_probe(nhi);
 	if (!tb)
 		tb = tb_probe(nhi);
@@ -1192,6 +1251,8 @@
 	}
 	pci_set_drvdata(pdev, tb);
 
+	device_wakeup_enable(&pdev->dev);
+
 	pm_runtime_allow(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
 	pm_runtime_use_autosuspend(&pdev->dev);
@@ -1221,14 +1282,13 @@
 static const struct dev_pm_ops nhi_pm_ops = {
 	.suspend_noirq = nhi_suspend_noirq,
 	.resume_noirq = nhi_resume_noirq,
-	.freeze_noirq = nhi_suspend_noirq, /*
+	.freeze_noirq = nhi_freeze_noirq,  /*
 					    * we just disable hotplug, the
 					    * pci-tunnels stay alive.
 					    */
-	.thaw_noirq = nhi_resume_noirq,
+	.thaw_noirq = nhi_thaw_noirq,
 	.restore_noirq = nhi_resume_noirq,
 	.suspend = nhi_suspend,
-	.freeze = nhi_suspend,
 	.poweroff_noirq = nhi_poweroff_noirq,
 	.poweroff = nhi_suspend,
 	.complete = nhi_complete,
@@ -1281,6 +1341,17 @@
 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+
+	/* Any USB4 compliant host */
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
 
 	{ 0,}
 };
@@ -1293,6 +1364,7 @@
 	.id_table = nhi_ids,
 	.probe = nhi_probe,
 	.remove = nhi_remove,
+	.shutdown = nhi_remove,
 	.driver.pm = &nhi_pm_ops,
 };
 
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b7b9739..4e0861d 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -73,5 +73,11 @@
 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE	0x15ef
 #define PCI_DEVICE_ID_INTEL_ICL_NHI1			0x8a0d
 #define PCI_DEVICE_ID_INTEL_ICL_NHI0			0x8a17
+#define PCI_DEVICE_ID_INTEL_TGL_NHI0			0x9a1b
+#define PCI_DEVICE_ID_INTEL_TGL_NHI1			0x9a1d
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0			0x9a1f
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1			0x9a21
+
+#define PCI_CLASS_SERIAL_USB_USB4			0x0c0340
 
 #endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
index 6795851..96da07e 100644
--- a/drivers/thunderbolt/nhi_ops.c
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -59,7 +59,7 @@
 	pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
 
 	if (power) {
-		unsigned int retries = 10;
+		unsigned int retries = 350;
 		u32 val;
 
 		/* Wait until the firmware tells it is up and running */
@@ -67,7 +67,7 @@
 			pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
 			if (val & VS_CAP_9_FW_READY)
 				return 0;
-			msleep(250);
+			usleep_range(3000, 3100);
 		} while (--retries);
 
 		return -ETIMEDOUT;
@@ -97,7 +97,7 @@
 		pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
 		if (data & VS_CAP_18_DONE)
 			goto clear;
-		msleep(100);
+		usleep_range(1000, 1100);
 	} while (time_before(jiffies, end));
 
 	return -ETIMEDOUT;
@@ -121,31 +121,38 @@
 
 static int icl_nhi_suspend(struct tb_nhi *nhi)
 {
+	struct tb *tb = pci_get_drvdata(nhi->pdev);
 	int ret;
 
 	if (icl_nhi_is_device_connected(nhi))
 		return 0;
 
-	/*
-	 * If there is no device connected we need to perform both: a
-	 * handshake through LC mailbox and force power down before
-	 * entering D3.
-	 */
-	icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
-	ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
-	if (ret)
-		return ret;
+	if (tb_switch_is_icm(tb->root_switch)) {
+		/*
+		 * If there is no device connected we need to perform
+		 * both: a handshake through LC mailbox and force power
+		 * down before entering D3.
+		 */
+		icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+		ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+		if (ret)
+			return ret;
+	}
 
 	return icl_nhi_force_power(nhi, false);
 }
 
 static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
 {
+	struct tb *tb = pci_get_drvdata(nhi->pdev);
 	enum icl_lc_mailbox_cmd cmd;
 
 	if (!pm_suspend_via_firmware())
 		return icl_nhi_suspend(nhi);
 
+	if (!tb_switch_is_icm(tb->root_switch))
+		return 0;
+
 	cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
 	icl_nhi_lc_mailbox_cmd(nhi, cmd);
 	return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
new file mode 100644
index 0000000..29de6d9
--- /dev/null
+++ b/drivers/thunderbolt/nvm.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM helpers
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(nvm_ida);
+
+/**
+ * tb_nvm_alloc() - Allocate new NVM structure
+ * @dev: Device owning the NVM
+ *
+ * Allocates new NVM structure with unique @id and returns it. In case
+ * of error returns ERR_PTR().
+ */
+struct tb_nvm *tb_nvm_alloc(struct device *dev)
+{
+	struct tb_nvm *nvm;
+	int ret;
+
+	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+	if (!nvm)
+		return ERR_PTR(-ENOMEM);
+
+	ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		kfree(nvm);
+		return ERR_PTR(ret);
+	}
+
+	nvm->id = ret;
+	nvm->dev = dev;
+
+	return nvm;
+}
+
+/**
+ * tb_nvm_add_active() - Adds active NVMem device to NVM
+ * @nvm: NVM structure
+ * @size: Size of the active NVM in bytes
+ * @reg_read: Pointer to the function to read the NVM (passed directly to the
+ *	      NVMem device)
+ *
+ * Registers new active NVmem device for @nvm. The @reg_read is called
+ * directly from NVMem so it must handle possible concurrent access if
+ * needed. The first parameter passed to @reg_read is @nvm structure.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
+{
+	struct nvmem_config config;
+	struct nvmem_device *nvmem;
+
+	memset(&config, 0, sizeof(config));
+
+	config.name = "nvm_active";
+	config.reg_read = reg_read;
+	config.read_only = true;
+	config.id = nvm->id;
+	config.stride = 4;
+	config.word_size = 4;
+	config.size = size;
+	config.dev = nvm->dev;
+	config.owner = THIS_MODULE;
+	config.priv = nvm;
+
+	nvmem = nvmem_register(&config);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	nvm->active = nvmem;
+	return 0;
+}
+
+/**
+ * tb_nvm_write_buf() - Write data to @nvm buffer
+ * @nvm: NVM structure
+ * @offset: Offset where to write the data
+ * @val: Data buffer to write
+ * @bytes: Number of bytes to write
+ *
+ * Helper function to cache the new NVM image before it is actually
+ * written to the flash. Copies @bytes from @val to @nvm->buf starting
+ * from @offset.
+ */
+int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
+		     size_t bytes)
+{
+	if (!nvm->buf) {
+		nvm->buf = vmalloc(NVM_MAX_SIZE);
+		if (!nvm->buf)
+			return -ENOMEM;
+	}
+
+	nvm->flushed = false;
+	nvm->buf_data_size = offset + bytes;
+	memcpy(nvm->buf + offset, val, bytes);
+	return 0;
+}
+
+/**
+ * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
+ * @nvm: NVM structure
+ * @size: Size of the non-active NVM in bytes
+ * @reg_write: Pointer to the function to write the NVM (passed directly
+ *	       to the NVMem device)
+ *
+ * Registers new non-active NVmem device for @nvm. The @reg_write is called
+ * directly from NVMem so it must handle possible concurrent access if
+ * needed. The first parameter passed to @reg_write is @nvm structure.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
+			  nvmem_reg_write_t reg_write)
+{
+	struct nvmem_config config;
+	struct nvmem_device *nvmem;
+
+	memset(&config, 0, sizeof(config));
+
+	config.name = "nvm_non_active";
+	config.reg_write = reg_write;
+	config.root_only = true;
+	config.id = nvm->id;
+	config.stride = 4;
+	config.word_size = 4;
+	config.size = size;
+	config.dev = nvm->dev;
+	config.owner = THIS_MODULE;
+	config.priv = nvm;
+
+	nvmem = nvmem_register(&config);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	nvm->non_active = nvmem;
+	return 0;
+}
+
+/**
+ * tb_nvm_free() - Release NVM and its resources
+ * @nvm: NVM structure to release
+ *
+ * Releases NVM and the NVMem devices if they were registered.
+ */
+void tb_nvm_free(struct tb_nvm *nvm)
+{
+	if (nvm) {
+		if (nvm->non_active)
+			nvmem_unregister(nvm->non_active);
+		if (nvm->active)
+			nvmem_unregister(nvm->active);
+		vfree(nvm->buf);
+		ida_simple_remove(&nvm_ida, nvm->id);
+	}
+	kfree(nvm);
+}
+
+void tb_nvm_exit(void)
+{
+	ida_destroy(&nvm_ida);
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index afe5f83..03e7b71 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -220,7 +220,8 @@
  * Creates path between two ports starting with given @src_hopid. Reserves
  * HopIDs for each port (they can be different from @src_hopid depending on
  * how many HopIDs each port already have reserved). If there are dual
- * links on the path, prioritizes using @link_nr.
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
  *
  * Return: Returns a tb_path on success or NULL on failure.
  */
@@ -228,7 +229,7 @@
 			      struct tb_port *dst, int dst_hopid, int link_nr,
 			      const char *name)
 {
-	struct tb_port *in_port, *out_port;
+	struct tb_port *in_port, *out_port, *first_port, *last_port;
 	int in_hopid, out_hopid;
 	struct tb_path *path;
 	size_t num_hops;
@@ -238,12 +239,23 @@
 	if (!path)
 		return NULL;
 
-	/*
-	 * Number of hops on a path is the distance between the two
-	 * switches plus the source adapter port.
-	 */
-	num_hops = abs(tb_route_length(tb_route(src->sw)) -
-		       tb_route_length(tb_route(dst->sw))) + 1;
+	first_port = last_port = NULL;
+	i = 0;
+	tb_for_each_port_on_path(src, dst, in_port) {
+		if (!first_port)
+			first_port = in_port;
+		last_port = in_port;
+		i++;
+	}
+
+	/* Check that src and dst are reachable */
+	if (first_port != src || last_port != dst) {
+		kfree(path);
+		return NULL;
+	}
+
+	/* Each hop takes two ports */
+	num_hops = i / 2;
 
 	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
 	if (!path->hops) {
@@ -259,7 +271,9 @@
 		if (!in_port)
 			goto err;
 
-		if (in_port->dual_link_port && in_port->link_nr != link_nr)
+		/* When lanes are bonded primary link must be used */
+		if (!in_port->bonded && in_port->dual_link_port &&
+		    in_port->link_nr != link_nr)
 			in_port = in_port->dual_link_port;
 
 		ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
@@ -271,8 +285,27 @@
 		if (!out_port)
 			goto err;
 
-		if (out_port->dual_link_port && out_port->link_nr != link_nr)
-			out_port = out_port->dual_link_port;
+		/*
+		 * Pick up right port when going from non-bonded to
+		 * bonded or from bonded to non-bonded.
+		 */
+		if (out_port->dual_link_port) {
+			if (!in_port->bonded && out_port->bonded &&
+			    out_port->link_nr) {
+				/*
+				 * Use primary link when going from
+				 * non-bonded to bonded.
+				 */
+				out_port = out_port->dual_link_port;
+			} else if (!out_port->bonded &&
+				   out_port->link_nr != link_nr) {
+				/*
+				 * If out port is not bonded follow
+				 * link_nr.
+				 */
+				out_port = out_port->dual_link_port;
+			}
+		}
 
 		if (i == num_hops - 1)
 			ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
@@ -535,3 +568,24 @@
 	}
 	return false;
 }
+
+/**
+ * tb_path_port_on_path() - Does the path go through certain port
+ * @path: Path to check
+ * @port: Switch to check
+ *
+ * Goes over all hops on path and checks if @port is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
+{
+	int i;
+
+	for (i = 0; i < path->path_length; i++) {
+		if (path->hops[i].in_port == port ||
+		    path->hops[i].out_port == port)
+			return true;
+	}
+
+	return false;
+}
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
new file mode 100644
index 0000000..57e2978
--- /dev/null
+++ b/drivers/thunderbolt/quirks.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - quirks
+ *
+ * Copyright (c) 2020 Mario Limonciello <mario.limonciello@dell.com>
+ */
+
+#include "tb.h"
+
+static void quirk_force_power_link(struct tb_switch *sw)
+{
+	sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
+}
+
+struct tb_quirk {
+	u16 vendor;
+	u16 device;
+	void (*hook)(struct tb_switch *sw);
+};
+
+static const struct tb_quirk tb_quirks[] = {
+	/* Dell WD19TB supports self-authentication on unplug */
+	{ 0x00d4, 0xb070, quirk_force_power_link },
+};
+
+/**
+ * tb_check_quirks() - Check for quirks to apply
+ * @sw: Thunderbolt switch
+ *
+ * Apply any quirks for the Thunderbolt controller.
+ */
+void tb_check_quirks(struct tb_switch *sw)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) {
+		const struct tb_quirk *q = &tb_quirks[i];
+
+		if (sw->device == q->device && sw->vendor == q->vendor)
+			q->hook(sw);
+	}
+}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
new file mode 100644
index 0000000..c44fad2
--- /dev/null
+++ b/drivers/thunderbolt/retimer.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt/USB4 retimer support.
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
+ *	    Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
+
+#include "sb_regs.h"
+#include "tb.h"
+
+#define TB_MAX_RETIMER_INDEX	6
+
+static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
+			       size_t bytes)
+{
+	struct tb_nvm *nvm = priv;
+	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+	int ret;
+
+	pm_runtime_get_sync(&rt->dev);
+
+	if (!mutex_trylock(&rt->tb->lock)) {
+		ret = restart_syscall();
+		goto out;
+	}
+
+	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
+	mutex_unlock(&rt->tb->lock);
+
+out:
+	pm_runtime_mark_last_busy(&rt->dev);
+	pm_runtime_put_autosuspend(&rt->dev);
+
+	return ret;
+}
+
+static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
+				size_t bytes)
+{
+	struct tb_nvm *nvm = priv;
+	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+	int ret = 0;
+
+	if (!mutex_trylock(&rt->tb->lock))
+		return restart_syscall();
+
+	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
+	mutex_unlock(&rt->tb->lock);
+
+	return ret;
+}
+
+static int tb_retimer_nvm_add(struct tb_retimer *rt)
+{
+	struct tb_nvm *nvm;
+	u32 val, nvm_size;
+	int ret;
+
+	nvm = tb_nvm_alloc(&rt->dev);
+	if (IS_ERR(nvm))
+		return PTR_ERR(nvm);
+
+	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
+					 sizeof(val));
+	if (ret)
+		goto err_nvm;
+
+	nvm->major = val >> 16;
+	nvm->minor = val >> 8;
+
+	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
+					 &val, sizeof(val));
+	if (ret)
+		goto err_nvm;
+
+	nvm_size = (SZ_1M << (val & 7)) / 8;
+	nvm_size = (nvm_size - SZ_16K) / 2;
+
+	ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
+	if (ret)
+		goto err_nvm;
+
+	ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
+	if (ret)
+		goto err_nvm;
+
+	rt->nvm = nvm;
+	return 0;
+
+err_nvm:
+	tb_nvm_free(nvm);
+	return ret;
+}
+
+static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
+{
+	unsigned int image_size, hdr_size;
+	const u8 *buf = rt->nvm->buf;
+	u16 ds_size, device;
+
+	image_size = rt->nvm->buf_data_size;
+	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+		return -EINVAL;
+
+	/*
+	 * FARB pointer must point inside the image and must at least
+	 * contain parts of the digital section we will be reading here.
+	 */
+	hdr_size = (*(u32 *)buf) & 0xffffff;
+	if (hdr_size + NVM_DEVID + 2 >= image_size)
+		return -EINVAL;
+
+	/* Digital section start should be aligned to 4k page */
+	if (!IS_ALIGNED(hdr_size, SZ_4K))
+		return -EINVAL;
+
+	/*
+	 * Read digital section size and check that it also fits inside
+	 * the image.
+	 */
+	ds_size = *(u16 *)(buf + hdr_size);
+	if (ds_size >= image_size)
+		return -EINVAL;
+
+	/*
+	 * Make sure the device ID in the image matches the retimer
+	 * hardware.
+	 */
+	device = *(u16 *)(buf + hdr_size + NVM_DEVID);
+	if (device != rt->device)
+		return -EINVAL;
+
+	/* Skip headers in the image */
+	buf += hdr_size;
+	image_size -= hdr_size;
+
+	return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
+					   image_size);
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+
+	return sprintf(buf, "%#x\n", rt->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+	int ret;
+
+	if (!mutex_trylock(&rt->tb->lock))
+		return restart_syscall();
+
+	if (!rt->nvm)
+		ret = -EAGAIN;
+	else
+		ret = sprintf(buf, "%#x\n", rt->auth_status);
+
+	mutex_unlock(&rt->tb->lock);
+
+	return ret;
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+	bool val;
+	int ret;
+
+	pm_runtime_get_sync(&rt->dev);
+
+	if (!mutex_trylock(&rt->tb->lock)) {
+		ret = restart_syscall();
+		goto exit_rpm;
+	}
+
+	if (!rt->nvm) {
+		ret = -EAGAIN;
+		goto exit_unlock;
+	}
+
+	ret = kstrtobool(buf, &val);
+	if (ret)
+		goto exit_unlock;
+
+	/* Always clear status */
+	rt->auth_status = 0;
+
+	if (val) {
+		if (!rt->nvm->buf) {
+			ret = -EINVAL;
+			goto exit_unlock;
+		}
+
+		ret = tb_retimer_nvm_validate_and_write(rt);
+		if (ret)
+			goto exit_unlock;
+
+		ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
+	}
+
+exit_unlock:
+	mutex_unlock(&rt->tb->lock);
+exit_rpm:
+	pm_runtime_mark_last_busy(&rt->dev);
+	pm_runtime_put_autosuspend(&rt->dev);
+
+	if (ret)
+		return ret;
+	return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+	int ret;
+
+	if (!mutex_trylock(&rt->tb->lock))
+		return restart_syscall();
+
+	if (!rt->nvm)
+		ret = -EAGAIN;
+	else
+		ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
+
+	mutex_unlock(&rt->tb->lock);
+	return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+
+	return sprintf(buf, "%#x\n", rt->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static struct attribute *retimer_attrs[] = {
+	&dev_attr_device.attr,
+	&dev_attr_nvm_authenticate.attr,
+	&dev_attr_nvm_version.attr,
+	&dev_attr_vendor.attr,
+	NULL
+};
+
+static const struct attribute_group retimer_group = {
+	.attrs = retimer_attrs,
+};
+
+static const struct attribute_group *retimer_groups[] = {
+	&retimer_group,
+	NULL
+};
+
+static void tb_retimer_release(struct device *dev)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+
+	kfree(rt);
+}
+
+struct device_type tb_retimer_type = {
+	.name = "thunderbolt_retimer",
+	.groups = retimer_groups,
+	.release = tb_retimer_release,
+};
+
+static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
+{
+	struct tb_retimer *rt;
+	u32 vendor, device;
+	int ret;
+
+	if (!port->cap_usb4)
+		return -EINVAL;
+
+	ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
+				     sizeof(vendor));
+	if (ret) {
+		if (ret != -ENODEV)
+			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
+		return ret;
+	}
+
+	ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
+				     sizeof(device));
+	if (ret) {
+		if (ret != -ENODEV)
+			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
+		return ret;
+	}
+
+	if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
+		tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
+			     vendor);
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Check that it supports NVM operations. If not then don't add
+	 * the device at all.
+	 */
+	ret = usb4_port_retimer_nvm_sector_size(port, index);
+	if (ret < 0)
+		return ret;
+
+	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+	if (!rt)
+		return -ENOMEM;
+
+	rt->index = index;
+	rt->vendor = vendor;
+	rt->device = device;
+	rt->auth_status = auth_status;
+	rt->port = port;
+	rt->tb = port->sw->tb;
+
+	rt->dev.parent = &port->sw->dev;
+	rt->dev.bus = &tb_bus_type;
+	rt->dev.type = &tb_retimer_type;
+	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
+		     port->port, index);
+
+	ret = device_register(&rt->dev);
+	if (ret) {
+		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
+		put_device(&rt->dev);
+		return ret;
+	}
+
+	ret = tb_retimer_nvm_add(rt);
+	if (ret) {
+		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
+		device_unregister(&rt->dev);
+		return ret;
+	}
+
+	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
+		 rt->vendor, rt->device);
+
+	pm_runtime_no_callbacks(&rt->dev);
+	pm_runtime_set_active(&rt->dev);
+	pm_runtime_enable(&rt->dev);
+	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
+	pm_runtime_mark_last_busy(&rt->dev);
+	pm_runtime_use_autosuspend(&rt->dev);
+
+	return 0;
+}
+
+static void tb_retimer_remove(struct tb_retimer *rt)
+{
+	dev_info(&rt->dev, "retimer disconnected\n");
+	tb_nvm_free(rt->nvm);
+	device_unregister(&rt->dev);
+}
+
+struct tb_retimer_lookup {
+	const struct tb_port *port;
+	u8 index;
+};
+
+static int retimer_match(struct device *dev, void *data)
+{
+	const struct tb_retimer_lookup *lookup = data;
+	struct tb_retimer *rt = tb_to_retimer(dev);
+
+	return rt && rt->port == lookup->port && rt->index == lookup->index;
+}
+
+static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
+{
+	struct tb_retimer_lookup lookup = { .port = port, .index = index };
+	struct device *dev;
+
+	dev = device_find_child(&port->sw->dev, &lookup, retimer_match);
+	if (dev)
+		return tb_to_retimer(dev);
+
+	return NULL;
+}
+
+/**
+ * tb_retimer_scan() - Scan for on-board retimers under port
+ * @port: USB4 port to scan
+ *
+ * Tries to enumerate on-board retimers connected to @port. Found
+ * retimers are registered as children of @port. Does not scan for cable
+ * retimers for now.
+ */
+int tb_retimer_scan(struct tb_port *port)
+{
+	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
+	int ret, i, last_idx = 0;
+
+	if (!port->cap_usb4)
+		return 0;
+
+	/*
+	 * Send broadcast RT to make sure retimer indices facing this
+	 * port are set.
+	 */
+	ret = usb4_port_enumerate_retimers(port);
+	if (ret)
+		return ret;
+
+	/*
+	 * Before doing anything else, read the authentication status.
+	 * If the retimer has it set, store it for the new retimer
+	 * device instance.
+	 */
+	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+		usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
+
+	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+		/*
+		 * Last retimer is true only for the last on-board
+		 * retimer (the one connected directly to the Type-C
+		 * port).
+		 */
+		ret = usb4_port_retimer_is_last(port, i);
+		if (ret > 0)
+			last_idx = i;
+		else if (ret < 0)
+			break;
+	}
+
+	if (!last_idx)
+		return 0;
+
+	/* Add on-board retimers if they do not exist already */
+	for (i = 1; i <= last_idx; i++) {
+		struct tb_retimer *rt;
+
+		rt = tb_port_find_retimer(port, i);
+		if (rt) {
+			put_device(&rt->dev);
+		} else {
+			ret = tb_retimer_add(port, i, status[i]);
+			if (ret && ret != -EOPNOTSUPP)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int remove_retimer(struct device *dev, void *data)
+{
+	struct tb_retimer *rt = tb_to_retimer(dev);
+	struct tb_port *port = data;
+
+	if (rt && rt->port == port)
+		tb_retimer_remove(rt);
+	return 0;
+}
+
+/**
+ * tb_retimer_remove_all() - Remove all retimers under port
+ * @port: USB4 port whose retimers to remove
+ *
+ * This removes all previously added retimers under @port.
+ */
+void tb_retimer_remove_all(struct tb_port *port)
+{
+	if (port->cap_usb4)
+		device_for_each_child_reverse(&port->sw->dev, port,
+					      remove_retimer);
+}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
new file mode 100644
index 0000000..9dafd69
--- /dev/null
+++ b/drivers/thunderbolt/sb_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USB4 port sideband registers found on routers and retimers
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *	    Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#ifndef _SB_REGS
+#define _SB_REGS
+
+#define USB4_SB_VENDOR_ID			0x00
+#define USB4_SB_PRODUCT_ID			0x01
+#define USB4_SB_OPCODE				0x08
+
+enum usb4_sb_opcode {
+	USB4_SB_OPCODE_ERR = 0x20525245,			/* "ERR " */
+	USB4_SB_OPCODE_ONS = 0x444d4321,			/* "!CMD" */
+	USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45,		/* "ENUM" */
+	USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c,		/* "LAST" */
+	USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47,	/* "GNSS" */
+	USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42,		/* "BOPS" */
+	USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42,		/* "BLKW" */
+	USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541,		/* "AUTH" */
+	USB4_SB_OPCODE_NVM_READ = 0x52524641,			/* "AFRR" */
+};
+
+#define USB4_SB_METADATA			0x09
+#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK	GENMASK(5, 0)
+#define USB4_SB_DATA				0x12
+
+#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index e53932d..c4b157c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -13,21 +13,12 @@
 #include <linux/sched/signal.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 
 #include "tb.h"
 
 /* Switch NVM support */
 
-#define NVM_DEVID		0x05
-#define NVM_VERSION		0x08
 #define NVM_CSS			0x10
-#define NVM_FLASH_SIZE		0x45
-
-#define NVM_MIN_SIZE		SZ_32K
-#define NVM_MAX_SIZE		SZ_512K
-
-static DEFINE_IDA(nvm_ida);
 
 struct nvm_auth_status {
 	struct list_head list;
@@ -35,6 +26,11 @@
 	u32 status;
 };
 
+enum nvm_write_ops {
+	WRITE_AND_AUTHENTICATE = 1,
+	WRITE_ONLY = 2,
+};
+
 /*
  * Hold NVM authentication failure status per switch This information
  * needs to stay around even when the switch gets power cycled so we
@@ -163,10 +159,16 @@
 		image_size -= hdr_size;
 	}
 
-	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+	if (tb_switch_is_usb4(sw))
+		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
+	else
+		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+	if (!ret)
+		sw->nvm->flushed = true;
+	return ret;
 }
 
-static int nvm_authenticate_host(struct tb_switch *sw)
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
 {
 	int ret = 0;
 
@@ -206,7 +208,7 @@
 	return ret;
 }
 
-static int nvm_authenticate_device(struct tb_switch *sw)
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
 {
 	int ret, retries = 10;
 
@@ -251,10 +253,83 @@
 	return -ETIMEDOUT;
 }
 
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	/*
+	 * During host router NVM upgrade we should not allow root port to
+	 * go into D3cold because some root ports cannot trigger PME
+	 * itself. To be on the safe side keep the root port in D0 during
+	 * the whole upgrade process.
+	 */
+	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+	if (tb_switch_is_usb4(sw)) {
+		/*
+		 * USB4 devices must support NVM operations but it is
+		 * optional for hosts. Therefore we query the NVM sector
+		 * size here and if it is supported assume NVM
+		 * operations are implemented.
+		 */
+		return usb4_switch_nvm_sector_size(sw) > 0;
+	}
+
+	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
+	return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+	if (sw->no_nvm_upgrade)
+		return false;
+	return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+			   void *buf, size_t size)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_nvm_read(sw, address, buf, size);
+	return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+	int ret;
+
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_nvm_authenticate(sw);
+
+	if (!tb_route(sw)) {
+		nvm_authenticate_start_dma_port(sw);
+		ret = nvm_authenticate_host_dma_port(sw);
+	} else {
+		ret = nvm_authenticate_device_dma_port(sw);
+	}
+
+	return ret;
+}
+
 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 			      size_t bytes)
 {
-	struct tb_switch *sw = priv;
+	struct tb_nvm *nvm = priv;
+	struct tb_switch *sw = tb_to_switch(nvm->dev);
 	int ret;
 
 	pm_runtime_get_sync(&sw->dev);
@@ -264,7 +339,7 @@
 		goto out;
 	}
 
-	ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+	ret = nvm_read(sw, offset, val, bytes);
 	mutex_unlock(&sw->tb->lock);
 
 out:
@@ -274,17 +349,12 @@
 	return ret;
 }
 
-static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
-				 size_t bytes)
-{
-	return -EPERM;
-}
-
 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 			       size_t bytes)
 {
-	struct tb_switch *sw = priv;
-	int ret = 0;
+	struct tb_nvm *nvm = priv;
+	struct tb_switch *sw = tb_to_switch(nvm->dev);
+	int ret;
 
 	if (!mutex_trylock(&sw->tb->lock))
 		return restart_syscall();
@@ -295,67 +365,37 @@
 	 * locally here and handle the special cases when the user asks
 	 * us to authenticate the image.
 	 */
-	if (!sw->nvm->buf) {
-		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
-		if (!sw->nvm->buf) {
-			ret = -ENOMEM;
-			goto unlock;
-		}
-	}
-
-	sw->nvm->buf_data_size = offset + bytes;
-	memcpy(sw->nvm->buf + offset, val, bytes);
-
-unlock:
+	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
 	mutex_unlock(&sw->tb->lock);
 
 	return ret;
 }
 
-static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
-					   size_t size, bool active)
-{
-	struct nvmem_config config;
-
-	memset(&config, 0, sizeof(config));
-
-	if (active) {
-		config.name = "nvm_active";
-		config.reg_read = tb_switch_nvm_read;
-		config.read_only = true;
-	} else {
-		config.name = "nvm_non_active";
-		config.reg_read = tb_switch_nvm_no_read;
-		config.reg_write = tb_switch_nvm_write;
-		config.root_only = true;
-	}
-
-	config.id = id;
-	config.stride = 4;
-	config.word_size = 4;
-	config.size = size;
-	config.dev = &sw->dev;
-	config.owner = THIS_MODULE;
-	config.priv = sw;
-
-	return nvmem_register(&config);
-}
-
 static int tb_switch_nvm_add(struct tb_switch *sw)
 {
-	struct nvmem_device *nvm_dev;
-	struct tb_switch_nvm *nvm;
+	struct tb_nvm *nvm;
 	u32 val;
 	int ret;
 
-	if (!sw->dma_port)
+	if (!nvm_readable(sw))
 		return 0;
 
-	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
-	if (!nvm)
-		return -ENOMEM;
+	/*
+	 * The NVM format of non-Intel hardware is not known so
+	 * currently restrict NVM upgrade for Intel hardware. We may
+	 * relax this in the future when we learn other NVM formats.
+	 */
+	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
+	    sw->config.vendor_id != 0x8087) {
+		dev_info(&sw->dev,
+			 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+			 sw->config.vendor_id);
+		return 0;
+	}
 
-	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+	nvm = tb_nvm_alloc(&sw->dev);
+	if (IS_ERR(nvm))
+		return PTR_ERR(nvm);
 
 	/*
 	 * If the switch is in safe-mode the only accessible portion of
@@ -365,56 +405,44 @@
 	if (!sw->safe_mode) {
 		u32 nvm_size, hdr_size;
 
-		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
-					  sizeof(val));
+		ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
 		if (ret)
-			goto err_ida;
+			goto err_nvm;
 
 		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
 		nvm_size = (SZ_1M << (val & 7)) / 8;
 		nvm_size = (nvm_size - hdr_size) / 2;
 
-		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
-					  sizeof(val));
+		ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
 		if (ret)
-			goto err_ida;
+			goto err_nvm;
 
 		nvm->major = val >> 16;
 		nvm->minor = val >> 8;
 
-		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
-		if (IS_ERR(nvm_dev)) {
-			ret = PTR_ERR(nvm_dev);
-			goto err_ida;
-		}
-		nvm->active = nvm_dev;
+		ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
+		if (ret)
+			goto err_nvm;
 	}
 
 	if (!sw->no_nvm_upgrade) {
-		nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
-		if (IS_ERR(nvm_dev)) {
-			ret = PTR_ERR(nvm_dev);
-			goto err_nvm_active;
-		}
-		nvm->non_active = nvm_dev;
+		ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
+					    tb_switch_nvm_write);
+		if (ret)
+			goto err_nvm;
 	}
 
 	sw->nvm = nvm;
 	return 0;
 
-err_nvm_active:
-	if (nvm->active)
-		nvmem_unregister(nvm->active);
-err_ida:
-	ida_simple_remove(&nvm_ida, nvm->id);
-	kfree(nvm);
-
+err_nvm:
+	tb_nvm_free(nvm);
 	return ret;
 }
 
 static void tb_switch_nvm_remove(struct tb_switch *sw)
 {
-	struct tb_switch_nvm *nvm;
+	struct tb_nvm *nvm;
 
 	nvm = sw->nvm;
 	sw->nvm = NULL;
@@ -426,13 +454,7 @@
 	if (!nvm->authenticating)
 		nvm_clear_auth_status(sw);
 
-	if (nvm->non_active)
-		nvmem_unregister(nvm->non_active);
-	if (nvm->active)
-		nvmem_unregister(nvm->active);
-	ida_simple_remove(&nvm_ida, nvm->id);
-	vfree(nvm->buf);
-	kfree(nvm);
+	tb_nvm_free(nvm);
 }
 
 /* port utility functions */
@@ -579,17 +601,24 @@
 	if (credits == 0 || port->sw->is_unplugged)
 		return 0;
 
-	nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+	/*
+	 * USB4 restricts programming NFC buffers to lane adapters only
+	 * so skip other ports.
+	 */
+	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
+		return 0;
+
+	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 	nfc_credits += credits;
 
-	tb_port_dbg(port, "adding %d NFC credits to %lu",
-		    credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
 
-	port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
 	port->config.nfc_credits |= nfc_credits;
 
 	return tb_port_write(port, &port->config.nfc_credits,
-			     TB_CFG_PORT, 4, 1);
+			     TB_CFG_PORT, ADP_CS_4, 1);
 }
 
 /**
@@ -604,14 +633,14 @@
 	u32 data;
 	int ret;
 
-	ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+	ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
 	if (ret)
 		return ret;
 
-	data &= ~TB_PORT_LCA_MASK;
-	data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+	data &= ~ADP_CS_5_LCA_MASK;
+	data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
 
-	return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+	return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
 }
 
 /**
@@ -627,6 +656,68 @@
 }
 
 /**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+	if (tb_switch_is_icm(port->sw))
+		return 0;
+	if (!tb_port_is_null(port))
+		return -EINVAL;
+	if (tb_switch_is_usb4(port->sw))
+		return usb4_port_unlock(port);
+	return 0;
+}
+
+static int __tb_port_enable(struct tb_port *port, bool enable)
+{
+	int ret;
+	u32 phy;
+
+	if (!tb_port_is_null(port))
+		return -EINVAL;
+
+	ret = tb_port_read(port, &phy, TB_CFG_PORT,
+			   port->cap_phy + LANE_ADP_CS_1, 1);
+	if (ret)
+		return ret;
+
+	if (enable)
+		phy &= ~LANE_ADP_CS_1_LD;
+	else
+		phy |= LANE_ADP_CS_1_LD;
+
+	return tb_port_write(port, &phy, TB_CFG_PORT,
+			     port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+/**
+ * tb_port_enable() - Enable lane adapter
+ * @port: Port to enable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to enable it.
+ */
+int tb_port_enable(struct tb_port *port)
+{
+	return __tb_port_enable(port, true);
+}
+
+/**
+ * tb_port_disable() - Disable lane adapter
+ * @port: Port to disable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to disable it.
+ */
+int tb_port_disable(struct tb_port *port)
+{
+	return __tb_port_enable(port, false);
+}
+
+/**
  * tb_init_port() - initialize a port
  *
  * This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -644,6 +735,7 @@
 		if (res == -ENODEV) {
 			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
 			       port->port);
+			port->disabled = true;
 			return 0;
 		}
 		return res;
@@ -657,6 +749,10 @@
 			port->cap_phy = cap;
 		else
 			tb_port_WARN(port, "non switch port without a PHY\n");
+
+		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+		if (cap > 0)
+			port->cap_usb4 = cap;
 	} else if (port->port != 0) {
 		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 		if (cap > 0)
@@ -665,12 +761,7 @@
 
 	tb_dump_port(port->sw->tb, &port->config);
 
-	/* Control port does not need HopID allocation */
-	if (port->port) {
-		ida_init(&port->in_hopids);
-		ida_init(&port->out_hopids);
-	}
-
+	INIT_LIST_HEAD(&port->list);
 	return 0;
 
 }
@@ -689,8 +780,11 @@
 		ida = &port->out_hopids;
 	}
 
-	/* HopIDs 0-7 are reserved */
-	if (min_hopid < TB_PATH_MIN_HOPID)
+	/*
+	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
+	 * reserved.
+	 */
+	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
 		min_hopid = TB_PATH_MIN_HOPID;
 
 	if (max_hopid < 0 || max_hopid > port_max_hopid)
@@ -747,6 +841,13 @@
 	ida_simple_remove(&port->out_hopids, hopid);
 }
 
+static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
+					  const struct tb_switch *sw)
+{
+	u64 mask = (1ULL << parent->config.depth * 8) - 1;
+	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
+}
+
 /**
  * tb_next_port_on_path() - Return next port for given port on a path
  * @start: Start port of the walk
@@ -776,12 +877,12 @@
 		return end;
 	}
 
-	if (start->sw->config.depth < end->sw->config.depth) {
+	if (tb_switch_is_reachable(prev->sw, end->sw)) {
+		next = tb_port_at(tb_route(end->sw), prev->sw);
+		/* Walk down the topology if next == prev */
 		if (prev->remote &&
-		    prev->remote->sw->config.depth > prev->sw->config.depth)
+		    (next == prev || next->dual_link_port == prev))
 			next = prev->remote;
-		else
-			next = tb_port_at(tb_route(end->sw), prev->sw);
 	} else {
 		if (tb_is_upstream_port(prev)) {
 			next = prev->remote;
@@ -798,7 +899,139 @@
 		}
 	}
 
-	return next;
+	return next != prev ? next : NULL;
+}
+
+/**
+ * tb_port_get_link_speed() - Get current link speed
+ * @port: Port to check (USB4 or CIO)
+ *
+ * Returns link speed in Gb/s or negative errno in case of failure.
+ */
+int tb_port_get_link_speed(struct tb_port *port)
+{
+	u32 val, speed;
+	int ret;
+
+	if (!port->cap_phy)
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_phy + LANE_ADP_CS_1, 1);
+	if (ret)
+		return ret;
+
+	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+	return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+	u32 val;
+	int ret;
+
+	if (!port->cap_phy)
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_phy + LANE_ADP_CS_1, 1);
+	if (ret)
+		return ret;
+
+	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+	u32 phy, widths;
+	int ret;
+
+	if (!port->cap_phy)
+		return false;
+
+	ret = tb_port_read(port, &phy, TB_CFG_PORT,
+			   port->cap_phy + LANE_ADP_CS_0, 1);
+	if (ret)
+		return false;
+
+	widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+		LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+	return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+	u32 val;
+	int ret;
+
+	if (!port->cap_phy)
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_phy + LANE_ADP_CS_1, 1);
+	if (ret)
+		return ret;
+
+	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+	switch (width) {
+	case 1:
+		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+		break;
+	case 2:
+		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val |= LANE_ADP_CS_1_LB;
+
+	return tb_port_write(port, &val, TB_CFG_PORT,
+			     port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+	int ret;
+
+	/*
+	 * Enable lane bonding for both links if not already enabled by
+	 * for example the boot firmware.
+	 */
+	ret = tb_port_get_link_width(port);
+	if (ret == 1) {
+		ret = tb_port_set_link_width(port, 2);
+		if (ret)
+			return ret;
+	}
+
+	ret = tb_port_get_link_width(port->dual_link_port);
+	if (ret == 1) {
+		ret = tb_port_set_link_width(port->dual_link_port, 2);
+		if (ret) {
+			tb_port_set_link_width(port, 1);
+			return ret;
+		}
+	}
+
+	port->bonded = true;
+	port->dual_link_port->bonded = true;
+
+	return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+	port->dual_link_port->bonded = false;
+	port->bonded = false;
+
+	tb_port_set_link_width(port->dual_link_port, 1);
+	tb_port_set_link_width(port, 1);
 }
 
 /**
@@ -816,12 +1049,47 @@
 	case TB_TYPE_DP_HDMI_OUT:
 		return tb_dp_port_is_enabled(port);
 
+	case TB_TYPE_USB3_UP:
+	case TB_TYPE_USB3_DOWN:
+		return tb_usb3_port_is_enabled(port);
+
 	default:
 		return false;
 	}
 }
 
 /**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+	u32 data;
+
+	if (tb_port_read(port, &data, TB_CFG_PORT,
+			 port->cap_adap + ADP_USB3_CS_0, 1))
+		return false;
+
+	return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+			  : ADP_USB3_CS_0_V;
+
+	if (!port->cap_adap)
+		return -ENXIO;
+	return tb_port_write(port, &word, TB_CFG_PORT,
+			     port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
  * @port: PCIe port to check
  */
@@ -829,10 +1097,11 @@
 {
 	u32 data;
 
-	if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+	if (tb_port_read(port, &data, TB_CFG_PORT,
+			 port->cap_adap + ADP_PCIE_CS_0, 1))
 		return false;
 
-	return !!(data & TB_PCI_EN);
+	return !!(data & ADP_PCIE_CS_0_PE);
 }
 
 /**
@@ -842,10 +1111,11 @@
  */
 int tb_pci_port_enable(struct tb_port *port, bool enable)
 {
-	u32 word = enable ? TB_PCI_EN : 0x0;
+	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
 	if (!port->cap_adap)
 		return -ENXIO;
-	return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+	return tb_port_write(port, &word, TB_CFG_PORT,
+			     port->cap_adap + ADP_PCIE_CS_0, 1);
 }
 
 /**
@@ -859,11 +1129,12 @@
 	u32 data;
 	int ret;
 
-	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+	ret = tb_port_read(port, &data, TB_CFG_PORT,
+			   port->cap_adap + ADP_DP_CS_2, 1);
 	if (ret)
 		return ret;
 
-	return !!(data & TB_DP_HDP);
+	return !!(data & ADP_DP_CS_2_HDP);
 }
 
 /**
@@ -877,12 +1148,14 @@
 	u32 data;
 	int ret;
 
-	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+	ret = tb_port_read(port, &data, TB_CFG_PORT,
+			   port->cap_adap + ADP_DP_CS_3, 1);
 	if (ret)
 		return ret;
 
-	data |= TB_DP_HPDC;
-	return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+	data |= ADP_DP_CS_3_HDPC;
+	return tb_port_write(port, &data, TB_CFG_PORT,
+			     port->cap_adap + ADP_DP_CS_3, 1);
 }
 
 /**
@@ -900,20 +1173,23 @@
 	u32 data[2];
 	int ret;
 
-	ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
-			   ARRAY_SIZE(data));
+	ret = tb_port_read(port, data, TB_CFG_PORT,
+			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
 	if (ret)
 		return ret;
 
-	data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
-	data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
 
-	data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
-	data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
-	data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+		ADP_DP_CS_0_VIDEO_HOPID_MASK;
+	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
 
-	return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
-			     ARRAY_SIZE(data));
+	return tb_port_write(port, data, TB_CFG_PORT,
+			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
 }
 
 /**
@@ -924,11 +1200,11 @@
 {
 	u32 data[2];
 
-	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
 			 ARRAY_SIZE(data)))
 		return false;
 
-	return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
 }
 
 /**
@@ -944,57 +1220,76 @@
 	u32 data[2];
 	int ret;
 
-	ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
-			   ARRAY_SIZE(data));
+	ret = tb_port_read(port, data, TB_CFG_PORT,
+			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
 	if (ret)
 		return ret;
 
 	if (enable)
-		data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
 	else
-		data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
 
-	return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
-			     ARRAY_SIZE(data));
+	return tb_port_write(port, data, TB_CFG_PORT,
+			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
 }
 
 /* switch utility functions */
 
-static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
 {
-	tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
-	       sw->vendor_id, sw->device_id, sw->revision,
-	       sw->thunderbolt_version);
-	tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
+	switch (sw->generation) {
+	case 1:
+		return "Thunderbolt 1";
+	case 2:
+		return "Thunderbolt 2";
+	case 3:
+		return "Thunderbolt 3";
+	case 4:
+		return "USB4";
+	default:
+		return "Unknown";
+	}
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
+{
+	const struct tb_regs_switch_header *regs = &sw->config;
+
+	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+	       regs->revision, regs->thunderbolt_version);
+	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
 	tb_dbg(tb, "  Config:\n");
 	tb_dbg(tb,
 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
-	       sw->upstream_port_number, sw->depth,
-	       (((u64) sw->route_hi) << 32) | sw->route_lo,
-	       sw->enabled, sw->plug_events_delay);
+	       regs->upstream_port_number, regs->depth,
+	       (((u64) regs->route_hi) << 32) | regs->route_lo,
+	       regs->enabled, regs->plug_events_delay);
 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
-	       sw->__unknown1, sw->__unknown4);
+	       regs->__unknown1, regs->__unknown4);
 }
 
 /**
  * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ * @sw: Switch to reset
  *
  * Return: Returns 0 on success or an error code on failure.
  */
-int tb_switch_reset(struct tb *tb, u64 route)
+int tb_switch_reset(struct tb_switch *sw)
 {
 	struct tb_cfg_result res;
-	struct tb_regs_switch_header header = {
-		header.route_hi = route >> 32,
-		header.route_lo = route,
-		header.enabled = true,
-	};
-	tb_dbg(tb, "resetting switch at %llx\n", route);
-	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
-			0, 2, 2, 2);
+
+	if (sw->generation > 1)
+		return 0;
+
+	tb_sw_dbg(sw, "resetting switch\n");
+
+	res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+			      TB_CFG_SWITCH, 2, 2);
 	if (res.err)
 		return res.err;
-	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
+	res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
 	if (res.err > 0)
 		return -EIO;
 	return res.err;
@@ -1012,7 +1307,7 @@
 	u32 data;
 	int res;
 
-	if (!sw->config.enabled)
+	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
 		return 0;
 
 	sw->config.plug_events_delay = 0xff;
@@ -1140,6 +1435,15 @@
 }
 static DEVICE_ATTR_RO(device_name);
 
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
@@ -1192,30 +1496,36 @@
 }
 static DEVICE_ATTR(key, 0600, key_show, key_store);
 
-static void nvm_authenticate_start(struct tb_switch *sw)
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
 {
-	struct pci_dev *root_port;
+	struct tb_switch *sw = tb_to_switch(dev);
 
-	/*
-	 * During host router NVM upgrade we should not allow root port to
-	 * go into D3cold because some root ports cannot trigger PME
-	 * itself. To be on the safe side keep the root port in D0 during
-	 * the whole upgrade process.
-	 */
-	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
-	if (root_port)
-		pm_runtime_get_noresume(&root_port->dev);
+	return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
 }
 
-static void nvm_authenticate_complete(struct tb_switch *sw)
-{
-	struct pci_dev *root_port;
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
 
-	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
-	if (root_port)
-		pm_runtime_put(&root_port->dev);
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->link_width);
 }
 
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
 static ssize_t nvm_authenticate_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -1226,11 +1536,11 @@
 	return sprintf(buf, "%#x\n", status);
 }
 
-static ssize_t nvm_authenticate_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
+				      bool disconnect)
 {
 	struct tb_switch *sw = tb_to_switch(dev);
-	bool val;
+	int val;
 	int ret;
 
 	pm_runtime_get_sync(&sw->dev);
@@ -1246,34 +1556,31 @@
 		goto exit_unlock;
 	}
 
-	ret = kstrtobool(buf, &val);
+	ret = kstrtoint(buf, 10, &val);
 	if (ret)
 		goto exit_unlock;
 
 	/* Always clear the authentication status */
 	nvm_clear_auth_status(sw);
 
-	if (val) {
-		if (!sw->nvm->buf) {
-			ret = -EINVAL;
-			goto exit_unlock;
+	if (val > 0) {
+		if (!sw->nvm->flushed) {
+			if (!sw->nvm->buf) {
+				ret = -EINVAL;
+				goto exit_unlock;
+			}
+
+			ret = nvm_validate_and_write(sw);
+			if (ret || val == WRITE_ONLY)
+				goto exit_unlock;
 		}
-
-		ret = nvm_validate_and_write(sw);
-		if (ret)
-			goto exit_unlock;
-
-		sw->nvm->authenticating = true;
-
-		if (!tb_route(sw)) {
-			/*
-			 * Keep root port from suspending as long as the
-			 * NVM upgrade process is running.
-			 */
-			nvm_authenticate_start(sw);
-			ret = nvm_authenticate_host(sw);
-		} else {
-			ret = nvm_authenticate_device(sw);
+		if (val == WRITE_AND_AUTHENTICATE) {
+			if (disconnect) {
+				ret = tb_lc_force_power(sw);
+			} else {
+				sw->nvm->authenticating = true;
+				ret = nvm_authenticate(sw);
+			}
 		}
 	}
 
@@ -1283,12 +1590,35 @@
 	pm_runtime_mark_last_busy(&sw->dev);
 	pm_runtime_put_autosuspend(&sw->dev);
 
+	return ret;
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret = nvm_authenticate_sysfs(dev, buf, false);
 	if (ret)
 		return ret;
 	return count;
 }
 static DEVICE_ATTR_RW(nvm_authenticate);
 
+static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	return nvm_authenticate_show(dev, attr, buf);
+}
+
+static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+
+	ret = nvm_authenticate_sysfs(dev, buf, true);
+	return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
+
 static ssize_t nvm_version_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -1343,9 +1673,15 @@
 	&dev_attr_boot.attr,
 	&dev_attr_device.attr,
 	&dev_attr_device_name.attr,
+	&dev_attr_generation.attr,
 	&dev_attr_key.attr,
 	&dev_attr_nvm_authenticate.attr,
+	&dev_attr_nvm_authenticate_on_disconnect.attr,
 	&dev_attr_nvm_version.attr,
+	&dev_attr_rx_speed.attr,
+	&dev_attr_rx_lanes.attr,
+	&dev_attr_tx_speed.attr,
+	&dev_attr_tx_lanes.attr,
 	&dev_attr_vendor.attr,
 	&dev_attr_vendor_name.attr,
 	&dev_attr_unique_id.attr,
@@ -1355,7 +1691,7 @@
 static umode_t switch_attr_is_visible(struct kobject *kobj,
 				      struct attribute *attr, int n)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct tb_switch *sw = tb_to_switch(dev);
 
 	if (attr == &dev_attr_device.attr) {
@@ -1376,18 +1712,29 @@
 		    sw->security_level == TB_SECURITY_SECURE)
 			return attr->mode;
 		return 0;
+	} else if (attr == &dev_attr_rx_speed.attr ||
+		   attr == &dev_attr_rx_lanes.attr ||
+		   attr == &dev_attr_tx_speed.attr ||
+		   attr == &dev_attr_tx_lanes.attr) {
+		if (tb_route(sw))
+			return attr->mode;
+		return 0;
 	} else if (attr == &dev_attr_nvm_authenticate.attr) {
-		if (sw->dma_port && !sw->no_nvm_upgrade)
+		if (nvm_upgradeable(sw))
 			return attr->mode;
 		return 0;
 	} else if (attr == &dev_attr_nvm_version.attr) {
-		if (sw->dma_port)
+		if (nvm_readable(sw))
 			return attr->mode;
 		return 0;
 	} else if (attr == &dev_attr_boot.attr) {
 		if (tb_route(sw))
 			return attr->mode;
 		return 0;
+	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
+		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
+			return attr->mode;
+		return 0;
 	}
 
 	return sw->safe_mode ? 0 : attr->mode;
@@ -1406,15 +1753,13 @@
 static void tb_switch_release(struct device *dev)
 {
 	struct tb_switch *sw = tb_to_switch(dev);
-	int i;
+	struct tb_port *port;
 
 	dma_port_free(sw->dma_port);
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (!sw->ports[i].disabled) {
-			ida_destroy(&sw->ports[i].in_hopids);
-			ida_destroy(&sw->ports[i].out_hopids);
-		}
+	tb_switch_for_each_port(sw, port) {
+		ida_destroy(&port->in_hopids);
+		ida_destroy(&port->out_hopids);
 	}
 
 	kfree(sw->uuid);
@@ -1493,6 +1838,9 @@
 		return 3;
 
 	default:
+		if (tb_switch_is_usb4(sw))
+			return 4;
+
 		/*
 		 * For unknown switches assume generation to be 1 to be
 		 * on the safe side.
@@ -1503,6 +1851,19 @@
 	}
 }
 
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+	int max_depth;
+
+	if (tb_switch_is_usb4(sw) ||
+	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+		max_depth = USB4_SWITCH_MAX_DEPTH;
+	else
+		max_depth = TB_SWITCH_MAX_DEPTH;
+
+	return depth > max_depth;
+}
+
 /**
  * tb_switch_alloc() - allocate a switch
  * @tb: Pointer to the owning domain
@@ -1524,10 +1885,16 @@
 	int upstream_port;
 	int i, ret, depth;
 
-	/* Make sure we do not exceed maximum topology limit */
+	/* Unlock the downstream port so we can access the switch below */
+	if (route) {
+		struct tb_switch *parent_sw = tb_to_switch(parent);
+		struct tb_port *down;
+
+		down = tb_port_at(route, parent_sw);
+		tb_port_unlock(down);
+	}
+
 	depth = tb_route_length(route);
-	if (depth > TB_SWITCH_MAX_DEPTH)
-		return ERR_PTR(-EADDRNOTAVAIL);
 
 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
 	if (upstream_port < 0)
@@ -1542,8 +1909,10 @@
 	if (ret)
 		goto err_free_sw_ports;
 
+	sw->generation = tb_switch_get_generation(sw);
+
 	tb_dbg(tb, "current switch config:\n");
-	tb_dump_switch(tb, &sw->config);
+	tb_dump_switch(tb, sw);
 
 	/* configure switch */
 	sw->config.upstream_port_number = upstream_port;
@@ -1552,6 +1921,12 @@
 	sw->config.route_lo = lower_32_bits(route);
 	sw->config.enabled = 0;
 
+	/* Make sure we do not exceed maximum topology limit */
+	if (tb_switch_exceeds_max_depth(sw, depth)) {
+		ret = -EADDRNOTAVAIL;
+		goto err_free_sw_ports;
+	}
+
 	/* initialize ports */
 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
 				GFP_KERNEL);
@@ -1564,16 +1939,17 @@
 		/* minimum setup for tb_find_cap and tb_drom_read to work */
 		sw->ports[i].sw = sw;
 		sw->ports[i].port = i;
-	}
 
-	sw->generation = tb_switch_get_generation(sw);
+		/* Control port does not need HopID allocation */
+		if (i) {
+			ida_init(&sw->ports[i].in_hopids);
+			ida_init(&sw->ports[i].out_hopids);
+		}
+	}
 
 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
-	if (ret < 0) {
-		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
-		goto err_free_sw_ports;
-	}
-	sw->cap_plug_events = ret;
+	if (ret > 0)
+		sw->cap_plug_events = ret;
 
 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
 	if (ret > 0)
@@ -1644,7 +2020,8 @@
  *
  * Call this function before the switch is added to the system. It will
  * upload configuration to the switch and makes it available for the
- * connection manager to use.
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
  *
  * Return: %0 in case of success and negative errno in case of failure
  */
@@ -1655,21 +2032,42 @@
 	int ret;
 
 	route = tb_route(sw);
-	tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
-	       route, tb_route_length(route), sw->config.upstream_port_number);
 
-	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
-		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
-			   sw->config.vendor_id);
+	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+	       sw->config.enabled ? "restoring" : "initializing", route,
+	       tb_route_length(route), sw->config.upstream_port_number);
 
 	sw->config.enabled = 1;
 
-	/* upload configuration */
-	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
-	if (ret)
-		return ret;
+	if (tb_switch_is_usb4(sw)) {
+		/*
+		 * For USB4 devices, we need to program the CM version
+		 * accordingly so that it knows to expose all the
+		 * additional capabilities.
+		 */
+		sw->config.cmuv = USB4_VERSION_1_0;
 
-	ret = tb_lc_configure_link(sw);
+		/* Enumerate the switch */
+		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+				  ROUTER_CS_1, 4);
+		if (ret)
+			return ret;
+
+		ret = usb4_switch_setup(sw);
+	} else {
+		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+				   sw->config.vendor_id);
+
+		if (!sw->cap_plug_events) {
+			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+			return -ENODEV;
+		}
+
+		/* Enumerate the switch */
+		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+				  ROUTER_CS_1, 3);
+	}
 	if (ret)
 		return ret;
 
@@ -1678,18 +2076,32 @@
 
 static int tb_switch_set_uuid(struct tb_switch *sw)
 {
+	bool uid = false;
 	u32 uuid[4];
 	int ret;
 
 	if (sw->uuid)
 		return 0;
 
-	/*
-	 * The newer controllers include fused UUID as part of link
-	 * controller specific registers
-	 */
-	ret = tb_lc_read_uuid(sw, uuid);
-	if (ret) {
+	if (tb_switch_is_usb4(sw)) {
+		ret = usb4_switch_read_uid(sw, &sw->uid);
+		if (ret)
+			return ret;
+		uid = true;
+	} else {
+		/*
+		 * The newer controllers include fused UUID as part of
+		 * link controller specific registers
+		 */
+		ret = tb_lc_read_uuid(sw, uuid);
+		if (ret) {
+			if (ret != -EINVAL)
+				return ret;
+			uid = true;
+		}
+	}
+
+	if (uid) {
 		/*
 		 * ICM generates UUID based on UID and fills the upper
 		 * two words with ones. This is not strictly following
@@ -1719,7 +2131,7 @@
 		if (tb_route(sw))
 			return 0;
 
-		/* fallthrough */
+		fallthrough;
 	case 3:
 		ret = tb_switch_set_uuid(sw);
 		if (ret)
@@ -1737,7 +2149,7 @@
 	}
 
 	/* Root switch DMA port requires running firmware */
-	if (!tb_route(sw) && sw->config.enabled)
+	if (!tb_route(sw) && !tb_switch_is_icm(sw))
 		return 0;
 
 	sw->dma_port = dma_port_alloc(sw);
@@ -1756,7 +2168,7 @@
 	nvm_get_auth_status(sw, &status);
 	if (status) {
 		if (!tb_route(sw))
-			nvm_authenticate_complete(sw);
+			nvm_authenticate_complete_dma_port(sw);
 		return 0;
 	}
 
@@ -1771,7 +2183,7 @@
 
 	/* Now we can allow root port to suspend again */
 	if (!tb_route(sw))
-		nvm_authenticate_complete(sw);
+		nvm_authenticate_complete_dma_port(sw);
 
 	if (status) {
 		tb_sw_info(sw, "switch flash authentication failed\n");
@@ -1788,6 +2200,218 @@
 	return -ESHUTDOWN;
 }
 
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+	int i;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+		struct tb_port *subordinate;
+
+		if (!tb_port_is_null(port))
+			continue;
+
+		/* Check for the subordinate port */
+		if (i == sw->config.max_port_number ||
+		    !tb_port_is_null(&sw->ports[i + 1]))
+			continue;
+
+		/* Link them if not already done so (by DROM) */
+		subordinate = &sw->ports[i + 1];
+		if (!port->dual_link_port && !subordinate->dual_link_port) {
+			port->link_nr = 0;
+			port->dual_link_port = subordinate;
+			subordinate->link_nr = 1;
+			subordinate->dual_link_port = port;
+
+			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+				  port->port, subordinate->port);
+		}
+	}
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+	const struct tb_port *up = tb_upstream_port(sw);
+
+	if (!up->dual_link_port || !up->dual_link_port->remote)
+		return false;
+
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_lane_bonding_possible(sw);
+	return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+	struct tb_port *up;
+	bool change = false;
+	int ret;
+
+	if (!tb_route(sw) || tb_switch_is_icm(sw))
+		return 0;
+
+	up = tb_upstream_port(sw);
+
+	ret = tb_port_get_link_speed(up);
+	if (ret < 0)
+		return ret;
+	if (sw->link_speed != ret)
+		change = true;
+	sw->link_speed = ret;
+
+	ret = tb_port_get_link_width(up);
+	if (ret < 0)
+		return ret;
+	if (sw->link_width != ret)
+		change = true;
+	sw->link_width = ret;
+
+	/* Notify userspace that there is possible link attribute change */
+	if (device_is_registered(&sw->dev) && change)
+		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+	return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+	struct tb_port *up, *down;
+	u64 route = tb_route(sw);
+	int ret;
+
+	if (!route)
+		return 0;
+
+	if (!tb_switch_lane_bonding_possible(sw))
+		return 0;
+
+	up = tb_upstream_port(sw);
+	down = tb_port_at(route, parent);
+
+	if (!tb_port_is_width_supported(up, 2) ||
+	    !tb_port_is_width_supported(down, 2))
+		return 0;
+
+	ret = tb_port_lane_bonding_enable(up);
+	if (ret) {
+		tb_port_warn(up, "failed to enable lane bonding\n");
+		return ret;
+	}
+
+	ret = tb_port_lane_bonding_enable(down);
+	if (ret) {
+		tb_port_warn(down, "failed to enable lane bonding\n");
+		tb_port_lane_bonding_disable(up);
+		return ret;
+	}
+
+	tb_switch_update_link_attributes(sw);
+
+	tb_sw_dbg(sw, "lane bonding enabled\n");
+	return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+	struct tb_port *up, *down;
+
+	if (!tb_route(sw))
+		return;
+
+	up = tb_upstream_port(sw);
+	if (!up->bonded)
+		return;
+
+	down = tb_port_at(tb_route(sw), parent);
+
+	tb_port_lane_bonding_disable(up);
+	tb_port_lane_bonding_disable(down);
+
+	tb_switch_update_link_attributes(sw);
+	tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
+/**
+ * tb_switch_configure_link() - Set link configured
+ * @sw: Switch whose link is configured
+ *
+ * Sets the link upstream from @sw configured (from both ends) so that
+ * it will not be disconnected when the domain exits sleep. Can be
+ * called for any switch.
+ *
+ * It is recommended that this is called after lane bonding is enabled.
+ *
+ * Returns %0 on success and negative errno in case of error.
+ */
+int tb_switch_configure_link(struct tb_switch *sw)
+{
+	struct tb_port *up, *down;
+	int ret;
+
+	if (!tb_route(sw) || tb_switch_is_icm(sw))
+		return 0;
+
+	up = tb_upstream_port(sw);
+	if (tb_switch_is_usb4(up->sw))
+		ret = usb4_port_configure(up);
+	else
+		ret = tb_lc_configure_port(up);
+	if (ret)
+		return ret;
+
+	down = up->remote;
+	if (tb_switch_is_usb4(down->sw))
+		return usb4_port_configure(down);
+	return tb_lc_configure_port(down);
+}
+
+/**
+ * tb_switch_unconfigure_link() - Unconfigure link
+ * @sw: Switch whose link is unconfigured
+ *
+ * Sets the link unconfigured so the @sw will be disconnected if the
+ * domain exists sleep.
+ */
+void tb_switch_unconfigure_link(struct tb_switch *sw)
+{
+	struct tb_port *up, *down;
+
+	if (sw->is_unplugged)
+		return;
+	if (!tb_route(sw) || tb_switch_is_icm(sw))
+		return;
+
+	up = tb_upstream_port(sw);
+	if (tb_switch_is_usb4(up->sw))
+		usb4_port_unconfigure(up);
+	else
+		tb_lc_unconfigure_port(up);
+
+	down = up->remote;
+	if (tb_switch_is_usb4(down->sw))
+		usb4_port_unconfigure(down);
+	else
+		tb_lc_unconfigure_port(down);
+}
+
 /**
  * tb_switch_add() - Add a switch to the domain
  * @sw: Switch to add
@@ -1812,21 +2436,25 @@
 	 * configuration based mailbox.
 	 */
 	ret = tb_switch_add_dma_port(sw);
-	if (ret)
+	if (ret) {
+		dev_err(&sw->dev, "failed to add DMA port\n");
 		return ret;
+	}
 
 	if (!sw->safe_mode) {
 		/* read drom */
 		ret = tb_drom_read(sw);
 		if (ret) {
-			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+			dev_err(&sw->dev, "reading DROM failed\n");
 			return ret;
 		}
 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
 
 		ret = tb_switch_set_uuid(sw);
-		if (ret)
+		if (ret) {
+			dev_err(&sw->dev, "failed to set UUID\n");
 			return ret;
+		}
 
 		for (i = 0; i <= sw->config.max_port_number; i++) {
 			if (sw->ports[i].disabled) {
@@ -1834,14 +2462,28 @@
 				continue;
 			}
 			ret = tb_init_port(&sw->ports[i]);
-			if (ret)
+			if (ret) {
+				dev_err(&sw->dev, "failed to initialize port %d\n", i);
 				return ret;
+			}
 		}
+
+		tb_switch_default_link_ports(sw);
+
+		ret = tb_switch_update_link_attributes(sw);
+		if (ret)
+			return ret;
+
+		ret = tb_switch_tmu_init(sw);
+		if (ret)
+			return ret;
 	}
 
 	ret = device_add(&sw->dev);
-	if (ret)
+	if (ret) {
+		dev_err(&sw->dev, "failed to add device: %d\n", ret);
 		return ret;
+	}
 
 	if (tb_route(sw)) {
 		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
@@ -1853,10 +2495,18 @@
 
 	ret = tb_switch_nvm_add(sw);
 	if (ret) {
+		dev_err(&sw->dev, "failed to add NVM devices\n");
 		device_del(&sw->dev);
 		return ret;
 	}
 
+	/*
+	 * Thunderbolt routers do not generate wakeups themselves but
+	 * they forward wakeups from tunneled protocols, so enable it
+	 * here.
+	 */
+	device_init_wakeup(&sw->dev, true);
+
 	pm_runtime_set_active(&sw->dev);
 	if (sw->rpm) {
 		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
@@ -1866,6 +2516,7 @@
 		pm_request_autosuspend(&sw->dev);
 	}
 
+	tb_switch_debugfs_init(sw);
 	return 0;
 }
 
@@ -1879,7 +2530,9 @@
  */
 void tb_switch_remove(struct tb_switch *sw)
 {
-	int i;
+	struct tb_port *port;
+
+	tb_switch_debugfs_remove(sw);
 
 	if (sw->rpm) {
 		pm_runtime_get_sync(&sw->dev);
@@ -1887,19 +2540,21 @@
 	}
 
 	/* port 0 is the switch itself and never has a remote */
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_port_has_remote(&sw->ports[i])) {
-			tb_switch_remove(sw->ports[i].remote->sw);
-			sw->ports[i].remote = NULL;
-		} else if (sw->ports[i].xdomain) {
-			tb_xdomain_remove(sw->ports[i].xdomain);
-			sw->ports[i].xdomain = NULL;
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_has_remote(port)) {
+			tb_switch_remove(port->remote->sw);
+			port->remote = NULL;
+		} else if (port->xdomain) {
+			tb_xdomain_remove(port->xdomain);
+			port->xdomain = NULL;
 		}
+
+		/* Remove any downstream retimers */
+		tb_retimer_remove_all(port);
 	}
 
 	if (!sw->is_unplugged)
 		tb_plug_events_active(sw, false);
-	tb_lc_unconfigure_link(sw);
 
 	tb_switch_nvm_remove(sw);
 
@@ -1913,7 +2568,8 @@
  */
 void tb_sw_set_unplugged(struct tb_switch *sw)
 {
-	int i;
+	struct tb_port *port;
+
 	if (sw == sw->tb->root_switch) {
 		tb_sw_WARN(sw, "cannot unplug root switch\n");
 		return;
@@ -1923,17 +2579,31 @@
 		return;
 	}
 	sw->is_unplugged = true;
-	for (i = 0; i <= sw->config.max_port_number; i++) {
-		if (tb_port_has_remote(&sw->ports[i]))
-			tb_sw_set_unplugged(sw->ports[i].remote->sw);
-		else if (sw->ports[i].xdomain)
-			sw->ports[i].xdomain->is_unplugged = true;
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_has_remote(port))
+			tb_sw_set_unplugged(port->remote->sw);
+		else if (port->xdomain)
+			port->xdomain->is_unplugged = true;
 	}
 }
 
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+	if (flags)
+		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
+	else
+		tb_sw_dbg(sw, "disabling wakeup\n");
+
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_set_wake(sw, flags);
+	return tb_lc_set_wake(sw, flags);
+}
+
 int tb_switch_resume(struct tb_switch *sw)
 {
-	int i, err;
+	struct tb_port *port;
+	int err;
+
 	tb_sw_dbg(sw, "resuming switch\n");
 
 	/*
@@ -1954,7 +2624,10 @@
 			return err;
 		}
 
-		err = tb_drom_read_uid_only(sw, &uid);
+		if (tb_switch_is_usb4(sw))
+			err = usb4_switch_read_uid(sw, &uid);
+		else
+			err = tb_drom_read_uid_only(sw, &uid);
 		if (err) {
 			tb_sw_warn(sw, "uid read failed\n");
 			return err;
@@ -1967,23 +2640,19 @@
 		}
 	}
 
-	/* upload configuration */
-	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
+	err = tb_switch_configure(sw);
 	if (err)
 		return err;
 
-	err = tb_lc_configure_link(sw);
-	if (err)
-		return err;
+	/* Disable wakes */
+	tb_switch_set_wake(sw, 0);
 
-	err = tb_plug_events_active(sw, true);
+	err = tb_switch_tmu_init(sw);
 	if (err)
 		return err;
 
 	/* check for surviving downstream switches */
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
-
+	tb_switch_for_each_port(sw, port) {
 		if (!tb_port_has_remote(port) && !port->xdomain)
 			continue;
 
@@ -1994,8 +2663,14 @@
 				tb_sw_set_unplugged(port->remote->sw);
 			else if (port->xdomain)
 				port->xdomain->is_unplugged = true;
-		} else if (tb_port_has_remote(port)) {
-			if (tb_switch_resume(port->remote->sw)) {
+		} else if (tb_port_has_remote(port) || port->xdomain) {
+			/*
+			 * Always unlock the port so the downstream
+			 * switch/domain is accessible.
+			 */
+			if (tb_port_unlock(port))
+				tb_port_warn(port, "failed to unlock port\n");
+			if (port->remote && tb_switch_resume(port->remote->sw)) {
 				tb_port_warn(port,
 					     "lost during suspend, disconnecting\n");
 				tb_sw_set_unplugged(port->remote->sw);
@@ -2005,19 +2680,100 @@
 	return 0;
 }
 
-void tb_switch_suspend(struct tb_switch *sw)
+/**
+ * tb_switch_suspend() - Put a switch to sleep
+ * @sw: Switch to suspend
+ * @runtime: Is this runtime suspend or system sleep
+ *
+ * Suspends router and all its children. Enables wakes according to
+ * value of @runtime and then sets sleep bit for the router. If @sw is
+ * host router the domain is ready to go to sleep once this function
+ * returns.
+ */
+void tb_switch_suspend(struct tb_switch *sw, bool runtime)
 {
-	int i, err;
+	unsigned int flags = 0;
+	struct tb_port *port;
+	int err;
+
+	tb_sw_dbg(sw, "suspending switch\n");
+
 	err = tb_plug_events_active(sw, false);
 	if (err)
 		return;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_port_has_remote(&sw->ports[i]))
-			tb_switch_suspend(sw->ports[i].remote->sw);
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_has_remote(port))
+			tb_switch_suspend(port->remote->sw, runtime);
 	}
 
-	tb_lc_set_sleep(sw);
+	if (runtime) {
+		/* Trigger wake when something is plugged in/out */
+		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
+		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+	} else if (device_may_wakeup(&sw->dev)) {
+		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+	}
+
+	tb_switch_set_wake(sw, flags);
+
+	if (tb_switch_is_usb4(sw))
+		usb4_switch_set_sleep(sw);
+	else
+		tb_lc_set_sleep(sw);
+}
+
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_query_dp_resource(sw, in);
+	return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_alloc_dp_resource(sw, in);
+	return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	int ret;
+
+	if (tb_switch_is_usb4(sw))
+		ret = usb4_switch_dealloc_dp_resource(sw, in);
+	else
+		ret = tb_lc_dp_sink_dealloc(sw, in);
+
+	if (ret)
+		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+			   in->port);
 }
 
 struct tb_sw_lookup {
@@ -2130,7 +2886,20 @@
 	return NULL;
 }
 
-void tb_switch_exit(void)
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+				    enum tb_port_type type)
 {
-	ida_destroy(&nvm_ida);
+	struct tb_port *port;
+
+	tb_switch_for_each_port(sw, port) {
+		if (port->config.type == type)
+			return port;
+	}
+
+	return NULL;
 }
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1f7a9e1..a56ea54 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,7 +9,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
-#include <linux/platform_data/x86/apple.h>
+#include <linux/pm_runtime.h>
 
 #include "tb.h"
 #include "tb_regs.h"
@@ -18,16 +18,26 @@
 /**
  * struct tb_cm - Simple Thunderbolt connection manager
  * @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
  * @hotplug_active: tb_handle_hotplug will stop progressing plug
  *		    events and exit if this is not set (it needs to
  *		    acquire the lock one more time). Used to drain wq
  *		    after cfg has been paused.
+ * @remove_work: Work used to remove any unplugged routers after
+ *		 runtime resume
  */
 struct tb_cm {
 	struct list_head tunnel_list;
+	struct list_head dp_resources;
 	bool hotplug_active;
+	struct delayed_work remove_work;
 };
 
+static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
+{
+	return ((void *)tcm - sizeof(struct tb));
+}
+
 struct tb_hotplug_event {
 	struct work_struct work;
 	struct tb *tb;
@@ -56,17 +66,51 @@
 
 /* enumeration & hot plug handling */
 
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+	struct tb_cm *tcm = tb_priv(sw->tb);
+	struct tb_port *port;
+
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_is_dpin(port))
+			continue;
+
+		if (!tb_switch_query_dp_resource(sw, port))
+			continue;
+
+		list_add_tail(&port->list, &tcm->dp_resources);
+		tb_port_dbg(port, "DP IN resource available\n");
+	}
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+	struct tb_cm *tcm = tb_priv(sw->tb);
+	struct tb_port *port, *tmp;
+
+	/* Clear children resources first */
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_has_remote(port))
+			tb_remove_dp_resources(port->remote->sw);
+	}
+
+	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+		if (port->sw == sw) {
+			tb_port_dbg(port, "DP OUT resource unavailable\n");
+			list_del_init(&port->list);
+		}
+	}
+}
+
 static void tb_discover_tunnels(struct tb_switch *sw)
 {
 	struct tb *tb = sw->tb;
 	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_port *port;
-	int i;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
+	tb_switch_for_each_port(sw, port) {
 		struct tb_tunnel *tunnel = NULL;
 
-		port = &sw->ports[i];
 		switch (port->config.type) {
 		case TB_TYPE_DP_HDMI_IN:
 			tunnel = tb_tunnel_discover_dp(tb, port);
@@ -76,6 +120,10 @@
 			tunnel = tb_tunnel_discover_pci(tb, port);
 			break;
 
+		case TB_TYPE_USB3_DOWN:
+			tunnel = tb_tunnel_discover_usb3(tb, port);
+			break;
+
 		default:
 			break;
 		}
@@ -90,17 +138,44 @@
 				parent->boot = true;
 				parent = tb_switch_parent(parent);
 			}
+		} else if (tb_tunnel_is_dp(tunnel)) {
+			/* Keep the domain from powering down */
+			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
+			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
 		}
 
 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
 	}
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_port_has_remote(&sw->ports[i]))
-			tb_discover_tunnels(sw->ports[i].remote->sw);
+	tb_switch_for_each_port(sw, port) {
+		if (tb_port_has_remote(port))
+			tb_discover_tunnels(port->remote->sw);
 	}
 }
 
+static int tb_port_configure_xdomain(struct tb_port *port)
+{
+	/*
+	 * XDomain paths currently only support single lane so we must
+	 * disable the other lane according to USB4 spec.
+	 */
+	tb_port_disable(port->dual_link_port);
+
+	if (tb_switch_is_usb4(port->sw))
+		return usb4_port_configure_xdomain(port);
+	return tb_lc_configure_xdomain(port);
+}
+
+static void tb_port_unconfigure_xdomain(struct tb_port *port)
+{
+	if (tb_switch_is_usb4(port->sw))
+		usb4_port_unconfigure_xdomain(port);
+	else
+		tb_lc_unconfigure_xdomain(port);
+
+	tb_port_enable(port->dual_link_port);
+}
+
 static void tb_scan_xdomain(struct tb_port *port)
 {
 	struct tb_switch *sw = port->sw;
@@ -119,10 +194,342 @@
 			      NULL);
 	if (xd) {
 		tb_port_at(route, sw)->xdomain = xd;
+		tb_port_configure_xdomain(port);
 		tb_xdomain_add(xd);
 	}
 }
 
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+	int ret;
+
+	/* If it is already enabled in correct mode, don't touch it */
+	if (tb_switch_tmu_is_enabled(sw))
+		return 0;
+
+	ret = tb_switch_tmu_disable(sw);
+	if (ret)
+		return ret;
+
+	ret = tb_switch_tmu_post_time(sw);
+	if (ret)
+		return ret;
+
+	return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+					   enum tb_port_type type)
+{
+	struct tb_port *port;
+
+	tb_switch_for_each_port(sw, port) {
+		if (tb_is_upstream_port(port))
+			continue;
+		if (port->config.type != type)
+			continue;
+		if (!port->cap_adap)
+			continue;
+		if (tb_port_is_enabled(port))
+			continue;
+		return port;
+	}
+	return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+					 const struct tb_port *port)
+{
+	struct tb_port *down;
+
+	down = usb4_switch_map_usb3_down(sw, port);
+	if (down && !tb_usb3_port_is_enabled(down))
+		return down;
+	return NULL;
+}
+
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+					struct tb_port *src_port,
+					struct tb_port *dst_port)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel;
+
+	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+		if (tunnel->type == type &&
+		    ((src_port && src_port == tunnel->src_port) ||
+		     (dst_port && dst_port == tunnel->dst_port))) {
+			return tunnel;
+		}
+	}
+
+	return NULL;
+}
+
+static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
+						   struct tb_port *src_port,
+						   struct tb_port *dst_port)
+{
+	struct tb_port *port, *usb3_down;
+	struct tb_switch *sw;
+
+	/* Pick the router that is deepest in the topology */
+	if (dst_port->sw->config.depth > src_port->sw->config.depth)
+		sw = dst_port->sw;
+	else
+		sw = src_port->sw;
+
+	/* Can't be the host router */
+	if (sw == tb->root_switch)
+		return NULL;
+
+	/* Find the downstream USB4 port that leads to this router */
+	port = tb_port_at(tb_route(sw), tb->root_switch);
+	/* Find the corresponding host router USB3 downstream port */
+	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
+	if (!usb3_down)
+		return NULL;
+
+	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
+}
+
+static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+	struct tb_port *dst_port, int *available_up, int *available_down)
+{
+	int usb3_consumed_up, usb3_consumed_down, ret;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel;
+	struct tb_port *port;
+
+	tb_port_dbg(dst_port, "calculating available bandwidth\n");
+
+	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+	if (tunnel) {
+		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
+						   &usb3_consumed_down);
+		if (ret)
+			return ret;
+	} else {
+		usb3_consumed_up = 0;
+		usb3_consumed_down = 0;
+	}
+
+	*available_up = *available_down = 40000;
+
+	/* Find the minimum available bandwidth over all links */
+	tb_for_each_port_on_path(src_port, dst_port, port) {
+		int link_speed, link_width, up_bw, down_bw;
+
+		if (!tb_port_is_null(port))
+			continue;
+
+		if (tb_is_upstream_port(port)) {
+			link_speed = port->sw->link_speed;
+		} else {
+			link_speed = tb_port_get_link_speed(port);
+			if (link_speed < 0)
+				return link_speed;
+		}
+
+		link_width = port->bonded ? 2 : 1;
+
+		up_bw = link_speed * link_width * 1000; /* Mb/s */
+		/* Leave 10% guard band */
+		up_bw -= up_bw / 10;
+		down_bw = up_bw;
+
+		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
+
+		/*
+		 * Find all DP tunnels that cross the port and reduce
+		 * their consumed bandwidth from the available.
+		 */
+		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+			int dp_consumed_up, dp_consumed_down;
+
+			if (!tb_tunnel_is_dp(tunnel))
+				continue;
+
+			if (!tb_tunnel_port_on_path(tunnel, port))
+				continue;
+
+			ret = tb_tunnel_consumed_bandwidth(tunnel,
+							   &dp_consumed_up,
+							   &dp_consumed_down);
+			if (ret)
+				return ret;
+
+			up_bw -= dp_consumed_up;
+			down_bw -= dp_consumed_down;
+		}
+
+		/*
+		 * If USB3 is tunneled from the host router down to the
+		 * branch leading to port we need to take USB3 consumed
+		 * bandwidth into account regardless whether it actually
+		 * crosses the port.
+		 */
+		up_bw -= usb3_consumed_up;
+		down_bw -= usb3_consumed_down;
+
+		if (up_bw < *available_up)
+			*available_up = up_bw;
+		if (down_bw < *available_down)
+			*available_down = down_bw;
+	}
+
+	if (*available_up < 0)
+		*available_up = 0;
+	if (*available_down < 0)
+		*available_down = 0;
+
+	return 0;
+}
+
+static int tb_release_unused_usb3_bandwidth(struct tb *tb,
+					    struct tb_port *src_port,
+					    struct tb_port *dst_port)
+{
+	struct tb_tunnel *tunnel;
+
+	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
+}
+
+static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
+				      struct tb_port *dst_port)
+{
+	int ret, available_up, available_down;
+	struct tb_tunnel *tunnel;
+
+	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+	if (!tunnel)
+		return;
+
+	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
+
+	/*
+	 * Calculate available bandwidth for the first hop USB3 tunnel.
+	 * That determines the whole USB3 bandwidth for this branch.
+	 */
+	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
+				     &available_up, &available_down);
+	if (ret) {
+		tb_warn(tb, "failed to calculate available bandwidth\n");
+		return;
+	}
+
+	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
+	       available_up, available_down);
+
+	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent = tb_switch_parent(sw);
+	int ret, available_up, available_down;
+	struct tb_port *up, *down, *port;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel;
+
+	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+	if (!up)
+		return 0;
+
+	if (!sw->link_usb4)
+		return 0;
+
+	/*
+	 * Look up available down port. Since we are chaining it should
+	 * be found right above this switch.
+	 */
+	port = tb_port_at(tb_route(sw), parent);
+	down = tb_find_usb3_down(parent, port);
+	if (!down)
+		return 0;
+
+	if (tb_route(parent)) {
+		struct tb_port *parent_up;
+		/*
+		 * Check first that the parent switch has its upstream USB3
+		 * port enabled. Otherwise the chain is not complete and
+		 * there is no point setting up a new tunnel.
+		 */
+		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+		if (!parent_up || !tb_port_is_enabled(parent_up))
+			return 0;
+
+		/* Make all unused bandwidth available for the new tunnel */
+		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
+		if (ret)
+			return ret;
+	}
+
+	ret = tb_available_bandwidth(tb, down, up, &available_up,
+				     &available_down);
+	if (ret)
+		goto err_reclaim;
+
+	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
+		    available_up, available_down);
+
+	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
+				      available_down);
+	if (!tunnel) {
+		ret = -ENOMEM;
+		goto err_reclaim;
+	}
+
+	if (tb_tunnel_activate(tunnel)) {
+		tb_port_info(up,
+			     "USB3 tunnel activation failed, aborting\n");
+		ret = -EIO;
+		goto err_free;
+	}
+
+	list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	if (tb_route(parent))
+		tb_reclaim_usb3_bandwidth(tb, down, up);
+
+	return 0;
+
+err_free:
+	tb_tunnel_free(tunnel);
+err_reclaim:
+	if (tb_route(parent))
+		tb_reclaim_usb3_bandwidth(tb, down, up);
+
+	return ret;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+	struct tb_port *port;
+	int ret;
+
+	if (tb_route(sw)) {
+		ret = tb_tunnel_usb3(sw->tb, sw);
+		if (ret)
+			return ret;
+	}
+
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_has_remote(port))
+			continue;
+		ret = tb_create_usb3_tunnels(port->remote->sw);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static void tb_scan_port(struct tb_port *port);
 
 /**
@@ -130,9 +537,15 @@
  */
 static void tb_scan_switch(struct tb_switch *sw)
 {
-	int i;
-	for (i = 1; i <= sw->config.max_port_number; i++)
-		tb_scan_port(&sw->ports[i]);
+	struct tb_port *port;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	tb_switch_for_each_port(sw, port)
+		tb_scan_port(port);
+
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
 }
 
 /**
@@ -168,6 +581,9 @@
 		tb_port_dbg(port, "port already has a remote\n");
 		return;
 	}
+
+	tb_retimer_scan(port);
+
 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
 			     tb_downstream_route(port));
 	if (IS_ERR(sw)) {
@@ -192,6 +608,7 @@
 	 */
 	if (port->xdomain) {
 		tb_xdomain_remove(port->xdomain);
+		tb_port_unconfigure_xdomain(port);
 		port->xdomain = NULL;
 	}
 
@@ -203,6 +620,12 @@
 	if (!tcm->hotplug_active)
 		dev_set_uevent_suppress(&sw->dev, true);
 
+	/*
+	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
+	 * can support runtime PM.
+	 */
+	sw->rpm = sw->generation > 1;
+
 	if (tb_switch_add(sw)) {
 		tb_switch_put(sw);
 		return;
@@ -217,27 +640,72 @@
 		upstream_port->dual_link_port->remote = port->dual_link_port;
 	}
 
+	/* Enable lane bonding if supported */
+	tb_switch_lane_bonding_enable(sw);
+	/* Set the link configured */
+	tb_switch_configure_link(sw);
+
+	if (tb_enable_tmu(sw))
+		tb_sw_warn(sw, "failed to enable TMU\n");
+
+	/* Scan upstream retimers */
+	tb_retimer_scan(upstream_port);
+
+	/*
+	 * Create USB 3.x tunnels only when the switch is plugged to the
+	 * domain. This is because we scan the domain also during discovery
+	 * and want to discover existing USB 3.x tunnels before we create
+	 * any new.
+	 */
+	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
+	tb_add_dp_resources(sw);
 	tb_scan_switch(sw);
 }
 
-static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
-			  struct tb_port *src_port, struct tb_port *dst_port)
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
 {
-	struct tb_cm *tcm = tb_priv(tb);
-	struct tb_tunnel *tunnel;
+	struct tb_port *src_port, *dst_port;
+	struct tb *tb;
 
-	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
-		if (tunnel->type == type &&
-		    ((src_port && src_port == tunnel->src_port) ||
-		     (dst_port && dst_port == tunnel->dst_port))) {
-			tb_tunnel_deactivate(tunnel);
-			list_del(&tunnel->list);
-			tb_tunnel_free(tunnel);
-			return 0;
-		}
+	if (!tunnel)
+		return;
+
+	tb_tunnel_deactivate(tunnel);
+	list_del(&tunnel->list);
+
+	tb = tunnel->tb;
+	src_port = tunnel->src_port;
+	dst_port = tunnel->dst_port;
+
+	switch (tunnel->type) {
+	case TB_TUNNEL_DP:
+		/*
+		 * In case of DP tunnel make sure the DP IN resource is
+		 * deallocated properly.
+		 */
+		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
+		/* Now we can allow the domain to runtime suspend again */
+		pm_runtime_mark_last_busy(&dst_port->sw->dev);
+		pm_runtime_put_autosuspend(&dst_port->sw->dev);
+		pm_runtime_mark_last_busy(&src_port->sw->dev);
+		pm_runtime_put_autosuspend(&src_port->sw->dev);
+		fallthrough;
+
+	case TB_TUNNEL_USB3:
+		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
+		break;
+
+	default:
+		/*
+		 * PCIe and DMA tunnels do not consume guaranteed
+		 * bandwidth.
+		 */
+		break;
 	}
 
-	return -ENODEV;
+	tb_tunnel_free(tunnel);
 }
 
 /**
@@ -250,11 +718,8 @@
 	struct tb_tunnel *n;
 
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
-		if (tb_tunnel_is_invalid(tunnel)) {
-			tb_tunnel_deactivate(tunnel);
-			list_del(&tunnel->list);
-			tb_tunnel_free(tunnel);
-		}
+		if (tb_tunnel_is_invalid(tunnel))
+			tb_deactivate_and_free_tunnel(tunnel);
 	}
 }
 
@@ -263,14 +728,17 @@
  */
 static void tb_free_unplugged_children(struct tb_switch *sw)
 {
-	int i;
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
+	struct tb_port *port;
 
+	tb_switch_for_each_port(sw, port) {
 		if (!tb_port_has_remote(port))
 			continue;
 
 		if (port->remote->sw->is_unplugged) {
+			tb_retimer_remove_all(port);
+			tb_remove_dp_resources(port->remote->sw);
+			tb_switch_unconfigure_link(port->remote->sw);
+			tb_switch_lane_bonding_disable(port->remote->sw);
 			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
 			if (port->dual_link_port)
@@ -281,54 +749,18 @@
 	}
 }
 
-/**
- * tb_find_port() - return the first port of @type on @sw or NULL
- * @sw: Switch to find the port from
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_port(struct tb_switch *sw,
-				    enum tb_port_type type)
-{
-	int i;
-	for (i = 1; i <= sw->config.max_port_number; i++)
-		if (sw->ports[i].config.type == type)
-			return &sw->ports[i];
-	return NULL;
-}
-
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
-					   enum tb_port_type type)
-{
-	int i;
-
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_is_upstream_port(&sw->ports[i]))
-			continue;
-		if (sw->ports[i].config.type != type)
-			continue;
-		if (!sw->ports[i].cap_adap)
-			continue;
-		if (tb_port_is_enabled(&sw->ports[i]))
-			continue;
-		return &sw->ports[i];
-	}
-	return NULL;
-}
-
 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
 					 const struct tb_port *port)
 {
+	struct tb_port *down = NULL;
+
 	/*
 	 * To keep plugging devices consistently in the same PCIe
-	 * hierarchy, do mapping here for root switch downstream PCIe
-	 * ports.
+	 * hierarchy, do mapping here for switch downstream PCIe ports.
 	 */
-	if (!tb_route(sw)) {
+	if (tb_switch_is_usb4(sw)) {
+		down = usb4_switch_map_pcie_down(sw, port);
+	} else if (!tb_route(sw)) {
 		int phy_port = tb_phy_port_from_link(port->port);
 		int index;
 
@@ -336,64 +768,242 @@
 		 * Hard-coded Thunderbolt port to PCIe down port mapping
 		 * per controller.
 		 */
-		if (tb_switch_is_cr(sw))
+		if (tb_switch_is_cactus_ridge(sw) ||
+		    tb_switch_is_alpine_ridge(sw))
 			index = !phy_port ? 6 : 7;
-		else if (tb_switch_is_fr(sw))
+		else if (tb_switch_is_falcon_ridge(sw))
 			index = !phy_port ? 6 : 8;
+		else if (tb_switch_is_titan_ridge(sw))
+			index = !phy_port ? 8 : 9;
 		else
 			goto out;
 
 		/* Validate the hard-coding */
 		if (WARN_ON(index > sw->config.max_port_number))
 			goto out;
-		if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+
+		down = &sw->ports[index];
+	}
+
+	if (down) {
+		if (WARN_ON(!tb_port_is_pcie_down(down)))
 			goto out;
-		if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+		if (tb_pci_port_is_enabled(down))
 			goto out;
 
-		return &sw->ports[index];
+		return down;
 	}
 
 out:
 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
 }
 
-static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
 {
+	struct tb_port *host_port, *port;
 	struct tb_cm *tcm = tb_priv(tb);
-	struct tb_switch *sw = out->sw;
+
+	host_port = tb_route(in->sw) ?
+		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
+
+	list_for_each_entry(port, &tcm->dp_resources, list) {
+		if (!tb_port_is_dpout(port))
+			continue;
+
+		if (tb_port_is_enabled(port)) {
+			tb_port_dbg(port, "in use\n");
+			continue;
+		}
+
+		tb_port_dbg(port, "DP OUT available\n");
+
+		/*
+		 * Keep the DP tunnel under the topology starting from
+		 * the same host router downstream port.
+		 */
+		if (host_port && tb_route(port->sw)) {
+			struct tb_port *p;
+
+			p = tb_port_at(tb_route(port->sw), tb->root_switch);
+			if (p != host_port)
+				continue;
+		}
+
+		return port;
+	}
+
+	return NULL;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+	int available_up, available_down, ret;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_port *port, *in, *out;
 	struct tb_tunnel *tunnel;
-	struct tb_port *in;
 
-	if (tb_port_is_enabled(out))
-		return 0;
+	/*
+	 * Find pair of inactive DP IN and DP OUT adapters and then
+	 * establish a DP tunnel between them.
+	 */
+	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
 
-	do {
-		sw = tb_to_switch(sw->dev.parent);
-		if (!sw)
-			return 0;
-		in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
-	} while (!in);
+	in = NULL;
+	out = NULL;
+	list_for_each_entry(port, &tcm->dp_resources, list) {
+		if (!tb_port_is_dpin(port))
+			continue;
 
-	tunnel = tb_tunnel_alloc_dp(tb, in, out);
+		if (tb_port_is_enabled(port)) {
+			tb_port_dbg(port, "in use\n");
+			continue;
+		}
+
+		tb_port_dbg(port, "DP IN available\n");
+
+		out = tb_find_dp_out(tb, port);
+		if (out) {
+			in = port;
+			break;
+		}
+	}
+
+	if (!in) {
+		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+		return;
+	}
+	if (!out) {
+		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+		return;
+	}
+
+	/*
+	 * DP stream needs the domain to be active so runtime resume
+	 * both ends of the tunnel.
+	 *
+	 * This should bring the routers in the middle active as well
+	 * and keeps the domain from runtime suspending while the DP
+	 * tunnel is active.
+	 */
+	pm_runtime_get_sync(&in->sw->dev);
+	pm_runtime_get_sync(&out->sw->dev);
+
+	if (tb_switch_alloc_dp_resource(in->sw, in)) {
+		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+		goto err_rpm_put;
+	}
+
+	/* Make all unused USB3 bandwidth available for the new DP tunnel */
+	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
+	if (ret) {
+		tb_warn(tb, "failed to release unused bandwidth\n");
+		goto err_dealloc_dp;
+	}
+
+	ret = tb_available_bandwidth(tb, in, out, &available_up,
+				     &available_down);
+	if (ret)
+		goto err_reclaim;
+
+	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
+	       available_up, available_down);
+
+	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
 	if (!tunnel) {
-		tb_port_dbg(out, "DP tunnel allocation failed\n");
-		return -ENOMEM;
+		tb_port_dbg(out, "could not allocate DP tunnel\n");
+		goto err_reclaim;
 	}
 
 	if (tb_tunnel_activate(tunnel)) {
 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
-		tb_tunnel_free(tunnel);
-		return -EIO;
+		goto err_free;
 	}
 
 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
-	return 0;
+	tb_reclaim_usb3_bandwidth(tb, in, out);
+	return;
+
+err_free:
+	tb_tunnel_free(tunnel);
+err_reclaim:
+	tb_reclaim_usb3_bandwidth(tb, in, out);
+err_dealloc_dp:
+	tb_switch_dealloc_dp_resource(in->sw, in);
+err_rpm_put:
+	pm_runtime_mark_last_busy(&out->sw->dev);
+	pm_runtime_put_autosuspend(&out->sw->dev);
+	pm_runtime_mark_last_busy(&in->sw->dev);
+	pm_runtime_put_autosuspend(&in->sw->dev);
 }
 
-static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
 {
-	tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+	struct tb_port *in, *out;
+	struct tb_tunnel *tunnel;
+
+	if (tb_port_is_dpin(port)) {
+		tb_port_dbg(port, "DP IN resource unavailable\n");
+		in = port;
+		out = NULL;
+	} else {
+		tb_port_dbg(port, "DP OUT resource unavailable\n");
+		in = NULL;
+		out = port;
+	}
+
+	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+	tb_deactivate_and_free_tunnel(tunnel);
+	list_del_init(&port->list);
+
+	/*
+	 * See if there is another DP OUT port that can be used for
+	 * to create another tunnel.
+	 */
+	tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_port *p;
+
+	if (tb_port_is_enabled(port))
+		return;
+
+	list_for_each_entry(p, &tcm->dp_resources, list) {
+		if (p == port)
+			return;
+	}
+
+	tb_port_dbg(port, "DP %s resource available\n",
+		    tb_port_is_dpin(port) ? "IN" : "OUT");
+	list_add_tail(&port->list, &tcm->dp_resources);
+
+	/* Look for suitable DP IN <-> DP OUT pairs now */
+	tb_tunnel_dp(tb);
+}
+
+static void tb_disconnect_and_release_dp(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel, *n;
+
+	/*
+	 * Tear down all DP tunnels and release their resources. They
+	 * will be re-established after resume based on plug events.
+	 */
+	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
+		if (tb_tunnel_is_dp(tunnel))
+			tb_deactivate_and_free_tunnel(tunnel);
+	}
+
+	while (!list_empty(&tcm->dp_resources)) {
+		struct tb_port *port;
+
+		port = list_first_entry(&tcm->dp_resources,
+					struct tb_port, list);
+		list_del_init(&port->list);
+	}
 }
 
 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
@@ -403,7 +1013,7 @@
 	struct tb_switch *parent_sw;
 	struct tb_tunnel *tunnel;
 
-	up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
 	if (!up)
 		return 0;
 
@@ -441,7 +1051,7 @@
 
 	sw = tb_to_switch(xd->dev.parent);
 	dst_port = tb_port_at(xd->route, sw);
-	nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
 
 	mutex_lock(&tb->lock);
 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
@@ -468,6 +1078,7 @@
 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 {
 	struct tb_port *dst_port;
+	struct tb_tunnel *tunnel;
 	struct tb_switch *sw;
 
 	sw = tb_to_switch(xd->dev.parent);
@@ -478,7 +1089,8 @@
 	 * case of cable disconnect) so it is fine if we cannot find it
 	 * here anymore.
 	 */
-	tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+	tb_deactivate_and_free_tunnel(tunnel);
 }
 
 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
@@ -505,6 +1117,10 @@
 	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_switch *sw;
 	struct tb_port *port;
+
+	/* Bring the domain back from sleep if it was suspended */
+	pm_runtime_get_sync(&tb->dev);
+
 	mutex_lock(&tb->lock);
 	if (!tcm->hotplug_active)
 		goto out; /* during init, suspend or shutdown */
@@ -528,15 +1144,26 @@
 		       ev->route, ev->port, ev->unplug);
 		goto put_sw;
 	}
+
+	pm_runtime_get_sync(&sw->dev);
+
 	if (ev->unplug) {
+		tb_retimer_remove_all(port);
+
 		if (tb_port_has_remote(port)) {
 			tb_port_dbg(port, "switch unplugged\n");
 			tb_sw_set_unplugged(port->remote->sw);
 			tb_free_invalid_tunnels(tb);
+			tb_remove_dp_resources(port->remote->sw);
+			tb_switch_tmu_disable(port->remote->sw);
+			tb_switch_unconfigure_link(port->remote->sw);
+			tb_switch_lane_bonding_disable(port->remote->sw);
 			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
 			if (port->dual_link_port)
 				port->dual_link_port->remote = NULL;
+			/* Maybe we can create another DP tunnel */
+			tb_tunnel_dp(tb);
 		} else if (port->xdomain) {
 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
 
@@ -553,8 +1180,9 @@
 			port->xdomain = NULL;
 			__tb_disconnect_xdomain_paths(tb, xd);
 			tb_xdomain_put(xd);
-		} else if (tb_port_is_dpout(port)) {
-			tb_teardown_dp(tb, port);
+			tb_port_unconfigure_xdomain(port);
+		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+			tb_dp_resource_unavailable(tb, port);
 		} else {
 			tb_port_dbg(port,
 				   "got unplug event for disconnected port, ignoring\n");
@@ -567,15 +1195,22 @@
 			tb_scan_port(port);
 			if (!port->remote)
 				tb_port_dbg(port, "hotplug: no switch found\n");
-		} else if (tb_port_is_dpout(port)) {
-			tb_tunnel_dp(tb, port);
+		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+			tb_dp_resource_available(tb, port);
 		}
 	}
 
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
 put_sw:
 	tb_switch_put(sw);
 out:
 	mutex_unlock(&tb->lock);
+
+	pm_runtime_mark_last_busy(&tb->dev);
+	pm_runtime_put_autosuspend(&tb->dev);
+
 	kfree(ev);
 }
 
@@ -597,8 +1232,7 @@
 
 	route = tb_cfg_get_route(&pkg->header);
 
-	if (tb_cfg_error(tb->ctl, route, pkg->port,
-			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
 			pkg->port);
 	}
@@ -612,6 +1246,7 @@
 	struct tb_tunnel *tunnel;
 	struct tb_tunnel *n;
 
+	cancel_delayed_work(&tcm->remove_work);
 	/* tunnels are only present after everything has been initialized */
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 		/*
@@ -663,6 +1298,8 @@
 	 * root switch.
 	 */
 	tb->root_switch->no_nvm_upgrade = true;
+	/* All USB4 routers support runtime PM */
+	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
 
 	ret = tb_switch_configure(tb->root_switch);
 	if (ret) {
@@ -677,10 +1314,19 @@
 		return ret;
 	}
 
+	/* Enable TMU if it is off */
+	tb_switch_tmu_enable(tb->root_switch);
 	/* Full scan to discover devices added before the driver was loaded. */
 	tb_scan_switch(tb->root_switch);
 	/* Find out tunnels created by the boot firmware */
 	tb_discover_tunnels(tb->root_switch);
+	/*
+	 * If the boot firmware did not create USB 3.x tunnels create them
+	 * now for the whole topology.
+	 */
+	tb_create_usb3_tunnels(tb->root_switch);
+	/* Add DP IN resources for the root switch */
+	tb_add_dp_resources(tb->root_switch);
 	/* Make the discovered switches available to the userspace */
 	device_for_each_child(&tb->root_switch->dev, NULL,
 			      tb_scan_finalize_switch);
@@ -695,13 +1341,40 @@
 	struct tb_cm *tcm = tb_priv(tb);
 
 	tb_dbg(tb, "suspending...\n");
-	tb_switch_suspend(tb->root_switch);
+	tb_disconnect_and_release_dp(tb);
+	tb_switch_suspend(tb->root_switch, false);
 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 	tb_dbg(tb, "suspend finished\n");
 
 	return 0;
 }
 
+static void tb_restore_children(struct tb_switch *sw)
+{
+	struct tb_port *port;
+
+	/* No need to restore if the router is already unplugged */
+	if (sw->is_unplugged)
+		return;
+
+	if (tb_enable_tmu(sw))
+		tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_has_remote(port) && !port->xdomain)
+			continue;
+
+		if (port->remote) {
+			tb_switch_lane_bonding_enable(port->remote->sw);
+			tb_switch_configure_link(port->remote->sw);
+
+			tb_restore_children(port->remote->sw);
+		} else if (port->xdomain) {
+			tb_port_configure_xdomain(port);
+		}
+	}
+}
+
 static int tb_resume_noirq(struct tb *tb)
 {
 	struct tb_cm *tcm = tb_priv(tb);
@@ -710,11 +1383,12 @@
 	tb_dbg(tb, "resuming...\n");
 
 	/* remove any pci devices the firmware might have setup */
-	tb_switch_reset(tb, 0);
+	tb_switch_reset(tb->root_switch);
 
 	tb_switch_resume(tb->root_switch);
 	tb_free_invalid_tunnels(tb);
 	tb_free_unplugged_children(tb->root_switch);
+	tb_restore_children(tb->root_switch);
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
 		tb_tunnel_restart(tunnel);
 	if (!list_empty(&tcm->tunnel_list)) {
@@ -734,15 +1408,16 @@
 
 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
 {
-	int i, ret = 0;
+	struct tb_port *port;
+	int ret = 0;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
-
+	tb_switch_for_each_port(sw, port) {
 		if (tb_is_upstream_port(port))
 			continue;
 		if (port->xdomain && port->xdomain->is_unplugged) {
+			tb_retimer_remove_all(port);
 			tb_xdomain_remove(port->xdomain);
+			tb_port_unconfigure_xdomain(port);
 			port->xdomain = NULL;
 			ret++;
 		} else if (port->remote) {
@@ -753,6 +1428,22 @@
 	return ret;
 }
 
+static int tb_freeze_noirq(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+
+	tcm->hotplug_active = false;
+	return 0;
+}
+
+static int tb_thaw_noirq(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+
+	tcm->hotplug_active = true;
+	return 0;
+}
+
 static void tb_complete(struct tb *tb)
 {
 	/*
@@ -766,12 +1457,64 @@
 	mutex_unlock(&tb->lock);
 }
 
+static int tb_runtime_suspend(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+
+	mutex_lock(&tb->lock);
+	tb_switch_suspend(tb->root_switch, true);
+	tcm->hotplug_active = false;
+	mutex_unlock(&tb->lock);
+
+	return 0;
+}
+
+static void tb_remove_work(struct work_struct *work)
+{
+	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
+	struct tb *tb = tcm_to_tb(tcm);
+
+	mutex_lock(&tb->lock);
+	if (tb->root_switch) {
+		tb_free_unplugged_children(tb->root_switch);
+		tb_free_unplugged_xdomains(tb->root_switch);
+	}
+	mutex_unlock(&tb->lock);
+}
+
+static int tb_runtime_resume(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel, *n;
+
+	mutex_lock(&tb->lock);
+	tb_switch_resume(tb->root_switch);
+	tb_free_invalid_tunnels(tb);
+	tb_restore_children(tb->root_switch);
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+		tb_tunnel_restart(tunnel);
+	tcm->hotplug_active = true;
+	mutex_unlock(&tb->lock);
+
+	/*
+	 * Schedule cleanup of any unplugged devices. Run this in a
+	 * separate thread to avoid possible deadlock if the device
+	 * removal runtime resumes the unplugged device.
+	 */
+	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
+	return 0;
+}
+
 static const struct tb_cm_ops tb_cm_ops = {
 	.start = tb_start,
 	.stop = tb_stop,
 	.suspend_noirq = tb_suspend_noirq,
 	.resume_noirq = tb_resume_noirq,
+	.freeze_noirq = tb_freeze_noirq,
+	.thaw_noirq = tb_thaw_noirq,
 	.complete = tb_complete,
+	.runtime_suspend = tb_runtime_suspend,
+	.runtime_resume = tb_runtime_resume,
 	.handle_event = tb_handle_event,
 	.approve_switch = tb_tunnel_pci,
 	.approve_xdomain_paths = tb_approve_xdomain_paths,
@@ -783,9 +1526,6 @@
 	struct tb_cm *tcm;
 	struct tb *tb;
 
-	if (!x86_apple_machine)
-		return NULL;
-
 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
 	if (!tb)
 		return NULL;
@@ -795,6 +1535,8 @@
 
 	tcm = tb_priv(tb);
 	INIT_LIST_HEAD(&tcm->tunnel_list);
+	INIT_LIST_HEAD(&tcm->dp_resources);
+	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
 
 	return tb;
 }
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 6407d52..8ea360b 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -18,8 +18,17 @@
 #include "ctl.h"
 #include "dma_port.h"
 
+#define NVM_MIN_SIZE		SZ_32K
+#define NVM_MAX_SIZE		SZ_512K
+
+/* Intel specific NVM offsets */
+#define NVM_DEVID		0x05
+#define NVM_VERSION		0x08
+#define NVM_FLASH_SIZE		0x45
+
 /**
- * struct tb_switch_nvm - Structure holding switch NVM information
+ * struct tb_nvm - Structure holding NVM information
+ * @dev: Owner of the NVM
  * @major: Major version number of the active NVM portion
  * @minor: Minor version number of the active NVM portion
  * @id: Identifier used with both NVM portions
@@ -29,9 +38,14 @@
  *	 the actual NVM flash device
  * @buf_data_size: Number of bytes actually consumed by the new NVM
  *		   image
- * @authenticating: The switch is authenticating the new NVM
+ * @authenticating: The device is authenticating the new NVM
+ * @flushed: The image has been flushed to the storage area
+ *
+ * The user of this structure needs to handle serialization of possible
+ * concurrent access.
  */
-struct tb_switch_nvm {
+struct tb_nvm {
+	struct device *dev;
 	u8 major;
 	u8 minor;
 	int id;
@@ -40,10 +54,44 @@
 	void *buf;
 	size_t buf_data_size;
 	bool authenticating;
+	bool flushed;
 };
 
 #define TB_SWITCH_KEY_SIZE		32
 #define TB_SWITCH_MAX_DEPTH		6
+#define USB4_SWITCH_MAX_DEPTH		5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ *			     transmission of the Delay Request TSNOS
+ *			     (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ *			       transmission of the Delay Request TSNOS on
+ *			       a Link
+ */
+enum tb_switch_tmu_rate {
+	TB_SWITCH_TMU_RATE_OFF = 0,
+	TB_SWITCH_TMU_RATE_HIFI = 16,
+	TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ *	  switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ *		    related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+	int cap;
+	bool has_ucap;
+	enum tb_switch_tmu_rate rate;
+	bool unidirectional;
+};
 
 /**
  * struct tb_switch - a thunderbolt switch
@@ -54,6 +102,7 @@
  *	      mailbox this will hold the pointer to that (%NULL
  *	      otherwise). If set it also means the switch has
  *	      upgradeable NVM.
+ * @tmu: The switch TMU configuration
  * @tb: Pointer to the domain the switch belongs to
  * @uid: Unique ID of the switch
  * @uuid: UUID of the switch (or %NULL if not supported)
@@ -61,6 +110,9 @@
  * @device: Device ID of the switch
  * @vendor_name: Name of the vendor (or %NULL if not known)
  * @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
+ * @link_usb4: Upstream link is USB4
  * @generation: Switch Thunderbolt generation
  * @cap_plug_events: Offset to the plug events capability (%0 if not found)
  * @cap_lc: Offset to the link controller capability (%0 if not found)
@@ -73,6 +125,7 @@
  * @rpm: The switch supports runtime PM
  * @authorized: Whether the switch is authorized by user or policy
  * @security_level: Switch supported security level
+ * @debugfs_dir: Pointer to the debugfs structure
  * @key: Contains the key used to challenge the device or %NULL if not
  *	 supported. Size of the key is %TB_SWITCH_KEY_SIZE.
  * @connection_id: Connection ID used with ICM messaging
@@ -81,6 +134,7 @@
  * @depth: Depth in the chain this switch is connected (ICM only)
  * @rpm_complete: Completion used to wait for runtime resume to
  *		  complete (ICM only)
+ * @quirks: Quirks used for this Thunderbolt switch
  *
  * When the switch is being added or removed to the domain (other
  * switches) you need to have domain lock held.
@@ -90,6 +144,7 @@
 	struct tb_regs_switch_header config;
 	struct tb_port *ports;
 	struct tb_dma_port *dma_port;
+	struct tb_switch_tmu tmu;
 	struct tb *tb;
 	u64 uid;
 	uuid_t *uuid;
@@ -97,24 +152,29 @@
 	u16 device;
 	const char *vendor_name;
 	const char *device_name;
+	unsigned int link_speed;
+	unsigned int link_width;
+	bool link_usb4;
 	unsigned int generation;
 	int cap_plug_events;
 	int cap_lc;
 	bool is_unplugged;
 	u8 *drom;
-	struct tb_switch_nvm *nvm;
+	struct tb_nvm *nvm;
 	bool no_nvm_upgrade;
 	bool safe_mode;
 	bool boot;
 	bool rpm;
 	unsigned int authorized;
 	enum tb_security_level security_level;
+	struct dentry *debugfs_dir;
 	u8 *key;
 	u8 connection_id;
 	u8 connection_key;
 	u8 link;
 	u8 depth;
 	struct completion rpm_complete;
+	unsigned long quirks;
 };
 
 /**
@@ -124,14 +184,18 @@
  * @remote: Remote port (%NULL if not connected)
  * @xdomain: Remote host (%NULL if not connected)
  * @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
  * @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
  * @port: Port number on switch
- * @disabled: Disabled by eeprom
+ * @disabled: Disabled by eeprom or enabled but not implemented
+ * @bonded: true if the port is bonded (two lanes combined as one)
  * @dual_link_port: If the switch is connected using two ports, points
  *		    to the other port.
  * @link_nr: Is this primary or secondary port on the dual_link.
  * @in_hopids: Currently allocated input HopIDs
  * @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
  */
 struct tb_port {
 	struct tb_regs_port_header config;
@@ -139,13 +203,39 @@
 	struct tb_port *remote;
 	struct tb_xdomain *xdomain;
 	int cap_phy;
+	int cap_tmu;
 	int cap_adap;
+	int cap_usb4;
 	u8 port;
 	bool disabled;
+	bool bonded;
 	struct tb_port *dual_link_port;
 	u8 link_nr:1;
 	struct ida in_hopids;
 	struct ida out_hopids;
+	struct list_head list;
+};
+
+/**
+ * tb_retimer: Thunderbolt retimer
+ * @dev: Device for the retimer
+ * @tb: Pointer to the domain the retimer belongs to
+ * @index: Retimer index facing the router USB4 port
+ * @vendor: Vendor ID of the retimer
+ * @device: Device ID of the retimer
+ * @port: Pointer to the lane 0 adapter
+ * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
+ * @auth_status: Status of last NVM authentication
+ */
+struct tb_retimer {
+	struct device dev;
+	struct tb *tb;
+	u8 index;
+	u32 vendor;
+	u32 device;
+	struct tb_port *port;
+	struct tb_nvm *nvm;
+	u32 auth_status;
 };
 
 /**
@@ -239,7 +329,18 @@
 
 /* HopIDs 0-7 are reserved by the Thunderbolt protocol */
 #define TB_PATH_MIN_HOPID	8
-#define TB_PATH_MAX_HOPS	7
+/*
+ * Support paths from the farthest (depth 6) router to the host and back
+ * to the same level (not necessarily to the same router).
+ */
+#define TB_PATH_MAX_HOPS	(7 * 2)
+
+/* Possible wake types */
+#define TB_WAKE_ON_CONNECT	BIT(0)
+#define TB_WAKE_ON_DISCONNECT	BIT(1)
+#define TB_WAKE_ON_USB4		BIT(2)
+#define TB_WAKE_ON_USB3		BIT(3)
+#define TB_WAKE_ON_PCIE		BIT(4)
 
 /**
  * struct tb_cm_ops - Connection manager specific operations vector
@@ -250,6 +351,8 @@
  * @suspend_noirq: Connection manager specific suspend_noirq
  * @resume_noirq: Connection manager specific resume_noirq
  * @suspend: Connection manager specific suspend
+ * @freeze_noirq: Connection manager specific freeze_noirq
+ * @thaw_noirq: Connection manager specific thaw_noirq
  * @complete: Connection manager specific complete
  * @runtime_suspend: Connection manager specific runtime_suspend
  * @runtime_resume: Connection manager specific runtime_resume
@@ -272,6 +375,8 @@
 	int (*suspend_noirq)(struct tb *tb);
 	int (*resume_noirq)(struct tb *tb);
 	int (*suspend)(struct tb *tb);
+	int (*freeze_noirq)(struct tb *tb);
+	int (*thaw_noirq)(struct tb *tb);
 	void (*complete)(struct tb *tb);
 	int (*runtime_suspend)(struct tb *tb);
 	int (*runtime_resume)(struct tb *tb);
@@ -365,6 +470,11 @@
 	return port && port->port && port->config.type == TB_TYPE_PORT;
 }
 
+static inline bool tb_port_is_nhi(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_NHI;
+}
+
 static inline bool tb_port_is_pcie_down(const struct tb_port *port)
 {
 	return port && port->config.type == TB_TYPE_PCIE_DOWN;
@@ -385,6 +495,16 @@
 	return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
 }
 
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
 static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
 			     enum tb_cfg_space space, u32 offset, u32 length)
 {
@@ -399,7 +519,7 @@
 			   length);
 }
 
-static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
 			      enum tb_cfg_space space, u32 offset, u32 length)
 {
 	if (sw->is_unplugged)
@@ -477,11 +597,11 @@
 struct tb *tb_probe(struct tb_nhi *nhi);
 
 extern struct device_type tb_domain_type;
+extern struct device_type tb_retimer_type;
 extern struct device_type tb_switch_type;
 
 int tb_domain_init(void);
 void tb_domain_exit(void);
-void tb_switch_exit(void);
 int tb_xdomain_init(void);
 void tb_xdomain_exit(void);
 
@@ -491,6 +611,8 @@
 int tb_domain_suspend_noirq(struct tb *tb);
 int tb_domain_resume_noirq(struct tb *tb);
 int tb_domain_suspend(struct tb *tb);
+int tb_domain_freeze_noirq(struct tb *tb);
+int tb_domain_thaw_noirq(struct tb *tb);
 void tb_domain_complete(struct tb *tb);
 int tb_domain_runtime_suspend(struct tb *tb);
 int tb_domain_runtime_resume(struct tb *tb);
@@ -514,6 +636,15 @@
 	put_device(&tb->dev);
 }
 
+struct tb_nvm *tb_nvm_alloc(struct device *dev);
+int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
+int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
+		     size_t bytes);
+int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
+			  nvmem_reg_write_t reg_write);
+void tb_nvm_free(struct tb_nvm *nvm);
+void tb_nvm_exit(void);
+
 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 				  u64 route);
 struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
@@ -521,15 +652,28 @@
 int tb_switch_configure(struct tb_switch *sw);
 int tb_switch_add(struct tb_switch *sw);
 void tb_switch_remove(struct tb_switch *sw);
-void tb_switch_suspend(struct tb_switch *sw);
+void tb_switch_suspend(struct tb_switch *sw, bool runtime);
 int tb_switch_resume(struct tb_switch *sw);
-int tb_switch_reset(struct tb *tb, u64 route);
+int tb_switch_reset(struct tb_switch *sw);
 void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+				    enum tb_port_type type);
 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
 					       u8 depth);
 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
 
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p)					\
+	for ((p) = &(sw)->ports[1];					\
+	     (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
 static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
 {
 	if (sw)
@@ -559,42 +703,147 @@
 	return tb_to_switch(sw->dev.parent);
 }
 
-static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
 {
-	return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
+	return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+	       sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
 }
 
-static inline bool tb_switch_is_er(const struct tb_switch *sw)
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
 {
-	return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
+	return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+	       sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
 }
 
-static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
 {
-	switch (sw->config.device_id) {
-	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
-	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
-		return true;
-	default:
-		return false;
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+		case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+			return true;
+		}
 	}
+	return false;
 }
 
-static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
 {
-	switch (sw->config.device_id) {
-	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
-	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
-		return true;
-	default:
-		return false;
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+			return true;
+		}
 	}
+	return false;
+}
+
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+			return true;
+		}
+	}
+	return false;
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+			return true;
+		}
+	}
+	return false;
+}
+
+static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
+{
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+		case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+			return true;
+		}
+	}
+	return false;
+}
+
+static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
+{
+	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+		case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+		case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+		case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+	return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+	return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+int tb_switch_configure_link(struct tb_switch *sw);
+void tb_switch_unconfigure_link(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+	return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+	       !sw->tmu.unidirectional;
 }
 
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
 int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
 int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
+int tb_port_enable(struct tb_port *port);
+int tb_port_disable(struct tb_port *port);
 int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
 void tb_port_release_in_hopid(struct tb_port *port, int hopid);
 int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -602,10 +851,30 @@
 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 				     struct tb_port *prev);
 
+/**
+ * tb_for_each_port_on_path() - Iterate over each port on path
+ * @src: Source port
+ * @dst: Destination port
+ * @p: Port used as iterator
+ *
+ * Walks over each port on path from @src to @dst.
+ */
+#define tb_for_each_port_on_path(src, dst, p)				\
+	for ((p) = tb_next_port_on_path((src), (dst), NULL); (p);	\
+	     (p) = tb_next_port_on_path((src), (dst), (p)))
+
+int tb_port_get_link_speed(struct tb_port *port);
+
 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
 int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
+int tb_port_next_cap(struct tb_port *port, unsigned int offset);
 bool tb_port_is_enabled(struct tb_port *port);
 
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
 bool tb_pci_port_is_enabled(struct tb_port *port);
 int tb_pci_port_enable(struct tb_port *port, bool enable);
 
@@ -626,14 +895,24 @@
 int tb_path_activate(struct tb_path *path);
 void tb_path_deactivate(struct tb_path *path);
 bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_port_on_path(const struct tb_path *path,
+			  const struct tb_port *port);
 
 int tb_drom_read(struct tb_switch *sw);
 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
 
 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
-int tb_lc_configure_link(struct tb_switch *sw);
-void tb_lc_unconfigure_link(struct tb_switch *sw);
+int tb_lc_configure_port(struct tb_port *port);
+void tb_lc_unconfigure_port(struct tb_port *port);
+int tb_lc_configure_xdomain(struct tb_port *port);
+void tb_lc_unconfigure_xdomain(struct tb_port *port);
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
 int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_force_power(struct tb_switch *sw);
 
 static inline int tb_route_length(u64 route)
 {
@@ -663,4 +942,102 @@
 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
 						 u8 depth);
 
+int tb_retimer_scan(struct tb_port *port);
+void tb_retimer_remove_all(struct tb_port *port);
+
+static inline bool tb_is_retimer(const struct device *dev)
+{
+	return dev->type == &tb_retimer_type;
+}
+
+static inline struct tb_retimer *tb_to_retimer(struct device *dev)
+{
+	if (tb_is_retimer(dev))
+		return container_of(dev, struct tb_retimer, dev);
+	return NULL;
+}
+
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+			  size_t size);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+			 size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+			  const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+					  const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+					  const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
+int usb4_port_configure(struct tb_port *port);
+void usb4_port_unconfigure(struct tb_port *port);
+int usb4_port_configure_xdomain(struct tb_port *port);
+void usb4_port_unconfigure_xdomain(struct tb_port *port);
+int usb4_port_enumerate_retimers(struct tb_port *port);
+
+int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+			   u8 size);
+int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+			    const void *buf, u8 size);
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
+				unsigned int address, const void *buf,
+				size_t size);
+int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
+					      u32 *status);
+int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+			       unsigned int address, void *buf, size_t size);
+
+int usb4_usb3_port_max_link_rate(struct tb_port *port);
+int usb4_usb3_port_actual_link_rate(struct tb_port *port);
+int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
+				       int *downstream_bw);
+int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
+				      int *downstream_bw);
+int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
+				     int *downstream_bw);
+
+/* Keep link controller awake during update */
+#define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
+
+void tb_check_quirks(struct tb_switch *sw);
+
+#ifdef CONFIG_ACPI
+void tb_acpi_add_links(struct tb_nhi *nhi);
+#else
+static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void tb_debugfs_init(void);
+void tb_debugfs_exit(void);
+void tb_switch_debugfs_init(struct tb_switch *sw);
+void tb_switch_debugfs_remove(struct tb_switch *sw);
+#else
+static inline void tb_debugfs_init(void) { }
+static inline void tb_debugfs_exit(void) { }
+static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
+static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
+#endif
+
+#ifdef CONFIG_USB4_KUNIT_TEST
+int tb_test_init(void);
+void tb_test_exit(void);
+#else
+static inline int tb_test_init(void) { return 0; }
+static inline void tb_test_exit(void) { }
+#endif
+
 #endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 4b641e4..0e01dbc 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -28,6 +28,7 @@
 	TB_CFG_ERROR_LOOP = 8,
 	TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
 	TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+	TB_CFG_ERROR_LOCK = 15,
 };
 
 /* common header */
@@ -67,9 +68,13 @@
 	u32 zero1:4;
 	u32 port:6;
 	u32 zero2:2; /* Both should be zero, still they are different fields. */
-	u32 zero3:16;
+	u32 zero3:14;
+	u32 pg:2;
 } __packed;
 
+#define TB_CFG_ERROR_PG_HOT_PLUG	0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG	0x3
+
 /* TB_CFG_PKG_EVENT */
 struct cfg_event_pkg {
 	struct tb_cfg_header header;
@@ -122,6 +127,8 @@
 #define ICM_FLAGS_NO_KEY		BIT(1)
 #define ICM_FLAGS_SLEVEL_SHIFT		3
 #define ICM_FLAGS_SLEVEL_MASK		GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE		BIT(5)
+#define ICM_FLAGS_SPEED_GEN3		BIT(7)
 #define ICM_FLAGS_WRITE			BIT(7)
 
 struct icm_pkg_driver_ready {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index deb9d4a..e7d9529 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -26,6 +26,7 @@
 #define TB_MAX_CONFIG_RW_LENGTH 60
 
 enum tb_switch_cap {
+	TB_SWITCH_CAP_TMU		= 0x03,
 	TB_SWITCH_CAP_VSE		= 0x05,
 };
 
@@ -38,9 +39,11 @@
 
 enum tb_port_cap {
 	TB_PORT_CAP_PHY			= 0x01,
+	TB_PORT_CAP_POWER		= 0x02,
 	TB_PORT_CAP_TIME1		= 0x03,
 	TB_PORT_CAP_ADAP		= 0x04,
 	TB_PORT_CAP_VSE			= 0x05,
+	TB_PORT_CAP_USB4		= 0x06,
 };
 
 enum tb_port_state {
@@ -91,6 +94,20 @@
 	u16 length;
 } __packed;
 
+/**
+ * struct tb_cap_any - Structure capable of hold every capability
+ * @basic: Basic capability
+ * @extended_short: Vendor specific capability
+ * @extended_long: Vendor specific extended capability
+ */
+struct tb_cap_any {
+	union {
+		struct tb_cap_basic basic;
+		struct tb_cap_extended_short extended_short;
+		struct tb_cap_extended_long extended_long;
+	};
+} __packed;
+
 /* capabilities */
 
 struct tb_cap_link_controller {
@@ -164,10 +181,56 @@
 				  * milliseconds. Writing 0x00 is interpreted
 				  * as 255ms.
 				  */
-	u32 __unknown4:16;
+	u32 cmuv:8;
+	u32 __unknown4:8;
 	u32 thunderbolt_version:8;
 } __packed;
 
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0			0x20
+
+#define ROUTER_CS_1				0x01
+#define ROUTER_CS_4				0x04
+#define ROUTER_CS_5				0x05
+#define ROUTER_CS_5_SLP				BIT(0)
+#define ROUTER_CS_5_WOP				BIT(1)
+#define ROUTER_CS_5_WOU				BIT(2)
+#define ROUTER_CS_5_C3S				BIT(23)
+#define ROUTER_CS_5_PTO				BIT(24)
+#define ROUTER_CS_5_UTO				BIT(25)
+#define ROUTER_CS_5_HCO				BIT(26)
+#define ROUTER_CS_5_CV				BIT(31)
+#define ROUTER_CS_6				0x06
+#define ROUTER_CS_6_SLPR			BIT(0)
+#define ROUTER_CS_6_TNS				BIT(1)
+#define ROUTER_CS_6_WOPS			BIT(2)
+#define ROUTER_CS_6_WOUS			BIT(3)
+#define ROUTER_CS_6_HCI				BIT(18)
+#define ROUTER_CS_6_CR				BIT(25)
+#define ROUTER_CS_7				0x07
+#define ROUTER_CS_9				0x09
+#define ROUTER_CS_25				0x19
+#define ROUTER_CS_26				0x1a
+#define ROUTER_CS_26_STATUS_MASK		GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT		24
+#define ROUTER_CS_26_ONS			BIT(30)
+#define ROUTER_CS_26_OV				BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0				0x00
+#define TMU_RTR_CS_0_TD				BIT(27)
+#define TMU_RTR_CS_0_UCAP			BIT(30)
+#define TMU_RTR_CS_1				0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK		GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT	16
+#define TMU_RTR_CS_2				0x02
+#define TMU_RTR_CS_3				0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK		GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK	GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT	16
+#define TMU_RTR_CS_22				0x16
+#define TMU_RTR_CS_24				0x18
+
 enum tb_port_type {
 	TB_TYPE_INACTIVE	= 0x000000,
 	TB_TYPE_PORT		= 0x000001,
@@ -178,7 +241,8 @@
 	TB_TYPE_DP_HDMI_OUT	= 0x0e0102,
 	TB_TYPE_PCIE_DOWN	= 0x100101,
 	TB_TYPE_PCIE_UP		= 0x100102,
-	/* TB_TYPE_USB		= 0x200000, lower order bits are not known */
+	TB_TYPE_USB3_DOWN	= 0x200101,
+	TB_TYPE_USB3_UP		= 0x200102,
 };
 
 /* Present on every port in TB_CF_PORT at address zero. */
@@ -189,7 +253,8 @@
 	/* DWORD 1 */
 	u32 first_cap_offset:8;
 	u32 max_counters:11;
-	u32 __unknown1:5;
+	u32 counters_support:1;
+	u32 __unknown1:4;
 	u32 revision:8;
 	/* DWORD 2 */
 	enum tb_port_type type:24;
@@ -211,37 +276,123 @@
 
 } __packed;
 
-/* DWORD 4 */
-#define TB_PORT_NFC_CREDITS_MASK	GENMASK(19, 0)
-#define TB_PORT_MAX_CREDITS_SHIFT	20
-#define TB_PORT_MAX_CREDITS_MASK	GENMASK(26, 20)
-/* DWORD 5 */
-#define TB_PORT_LCA_SHIFT		22
-#define TB_PORT_LCA_MASK		GENMASK(28, 22)
+/* Basic adapter configuration registers */
+#define ADP_CS_4				0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK		GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK		GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT		20
+#define ADP_CS_4_LCK				BIT(31)
+#define ADP_CS_5				0x05
+#define ADP_CS_5_LCA_MASK			GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT			22
+
+/* TMU adapter registers */
+#define TMU_ADP_CS_3				0x03
+#define TMU_ADP_CS_3_UDM			BIT(29)
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0				0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK	GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT	20
+#define LANE_ADP_CS_1				0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK		GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT	4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE	0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL		0x3
+#define LANE_ADP_CS_1_LD			BIT(14)
+#define LANE_ADP_CS_1_LB			BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK	GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT	16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2	0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3	0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK	GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT	20
+
+/* USB4 port registers */
+#define PORT_CS_1				0x01
+#define PORT_CS_1_LENGTH_SHIFT			8
+#define PORT_CS_1_TARGET_MASK			GENMASK(18, 16)
+#define PORT_CS_1_TARGET_SHIFT			16
+#define PORT_CS_1_RETIMER_INDEX_SHIFT		20
+#define PORT_CS_1_WNR_WRITE			BIT(24)
+#define PORT_CS_1_NR				BIT(25)
+#define PORT_CS_1_RC				BIT(26)
+#define PORT_CS_1_PND				BIT(31)
+#define PORT_CS_2				0x02
+#define PORT_CS_18				0x12
+#define PORT_CS_18_BE				BIT(8)
+#define PORT_CS_18_TCM				BIT(9)
+#define PORT_CS_18_WOU4S			BIT(18)
+#define PORT_CS_19				0x13
+#define PORT_CS_19_PC				BIT(3)
+#define PORT_CS_19_PID				BIT(4)
+#define PORT_CS_19_WOC				BIT(16)
+#define PORT_CS_19_WOD				BIT(17)
+#define PORT_CS_19_WOU4				BIT(18)
 
 /* Display Port adapter registers */
-
-/* DWORD 0 */
-#define TB_DP_VIDEO_HOPID_SHIFT		16
-#define TB_DP_VIDEO_HOPID_MASK		GENMASK(26, 16)
-#define TB_DP_AUX_EN			BIT(30)
-#define TB_DP_VIDEO_EN			BIT(31)
-/* DWORD 1 */
-#define TB_DP_AUX_TX_HOPID_MASK		GENMASK(10, 0)
-#define TB_DP_AUX_RX_HOPID_SHIFT	11
-#define TB_DP_AUX_RX_HOPID_MASK		GENMASK(21, 11)
-/* DWORD 2 */
-#define TB_DP_HDP			BIT(6)
-/* DWORD 3 */
-#define TB_DP_HPDC			BIT(9)
-/* DWORD 4 */
-#define TB_DP_LOCAL_CAP			0x4
-/* DWORD 5 */
-#define TB_DP_REMOTE_CAP		0x5
+#define ADP_DP_CS_0				0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK		GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT		16
+#define ADP_DP_CS_0_AE				BIT(30)
+#define ADP_DP_CS_0_VE				BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK		GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK		GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT		11
+#define ADP_DP_CS_2				0x02
+#define ADP_DP_CS_2_HDP				BIT(6)
+#define ADP_DP_CS_3				0x03
+#define ADP_DP_CS_3_HDPC			BIT(9)
+#define DP_LOCAL_CAP				0x04
+#define DP_REMOTE_CAP				0x05
+#define DP_STATUS_CTRL				0x06
+#define DP_STATUS_CTRL_CMHS			BIT(25)
+#define DP_STATUS_CTRL_UF			BIT(26)
+#define DP_COMMON_CAP				0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK			GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT		8
+#define DP_COMMON_CAP_RATE_RBR			0x0
+#define DP_COMMON_CAP_RATE_HBR			0x1
+#define DP_COMMON_CAP_RATE_HBR2			0x2
+#define DP_COMMON_CAP_RATE_HBR3			0x3
+#define DP_COMMON_CAP_LANES_MASK		GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT		12
+#define DP_COMMON_CAP_1_LANE			0x0
+#define DP_COMMON_CAP_2_LANES			0x1
+#define DP_COMMON_CAP_4_LANES			0x2
+#define DP_COMMON_CAP_DPRX_DONE			BIT(31)
 
 /* PCIe adapter registers */
+#define ADP_PCIE_CS_0				0x00
+#define ADP_PCIE_CS_0_PE			BIT(31)
 
-#define TB_PCI_EN			BIT(31)
+/* USB adapter registers */
+#define ADP_USB3_CS_0				0x00
+#define ADP_USB3_CS_0_V				BIT(30)
+#define ADP_USB3_CS_0_PE			BIT(31)
+#define ADP_USB3_CS_1				0x01
+#define ADP_USB3_CS_1_CUBW_MASK			GENMASK(11, 0)
+#define ADP_USB3_CS_1_CDBW_MASK			GENMASK(23, 12)
+#define ADP_USB3_CS_1_CDBW_SHIFT		12
+#define ADP_USB3_CS_1_HCA			BIT(31)
+#define ADP_USB3_CS_2				0x02
+#define ADP_USB3_CS_2_AUBW_MASK			GENMASK(11, 0)
+#define ADP_USB3_CS_2_ADBW_MASK			GENMASK(23, 12)
+#define ADP_USB3_CS_2_ADBW_SHIFT		12
+#define ADP_USB3_CS_2_CMR			BIT(31)
+#define ADP_USB3_CS_3				0x03
+#define ADP_USB3_CS_3_SCALE_MASK		GENMASK(5, 0)
+#define ADP_USB3_CS_4				0x04
+#define ADP_USB3_CS_4_ALR_MASK			GENMASK(6, 0)
+#define ADP_USB3_CS_4_ALR_20G			0x1
+#define ADP_USB3_CS_4_ULV			BIT(7)
+#define ADP_USB3_CS_4_MSLR_MASK			GENMASK(18, 12)
+#define ADP_USB3_CS_4_MSLR_SHIFT		12
+#define ADP_USB3_CS_4_MSLR_20G			0x1
 
 /* Hop register from TB_CFG_HOPS. 8 byte per entry. */
 struct tb_regs_hop {
@@ -278,11 +429,27 @@
 #define TB_LC_DESC_PORT_SIZE_SHIFT	16
 #define TB_LC_DESC_PORT_SIZE_MASK	GENMASK(27, 16)
 #define TB_LC_FUSE			0x03
+#define TB_LC_SNK_ALLOCATION		0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK	GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM	0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT	4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK	GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM	0x1
+#define TB_LC_POWER			0x740
 
 /* Link controller registers */
+#define TB_LC_PORT_ATTR			0x8d
+#define TB_LC_PORT_ATTR_BE		BIT(12)
+
 #define TB_LC_SX_CTRL			0x96
+#define TB_LC_SX_CTRL_WOC		BIT(1)
+#define TB_LC_SX_CTRL_WOD		BIT(2)
+#define TB_LC_SX_CTRL_WOU4		BIT(5)
+#define TB_LC_SX_CTRL_WOP		BIT(6)
 #define TB_LC_SX_CTRL_L1C		BIT(16)
+#define TB_LC_SX_CTRL_L1D		BIT(17)
 #define TB_LC_SX_CTRL_L2C		BIT(20)
+#define TB_LC_SX_CTRL_L2D		BIT(21)
 #define TB_LC_SX_CTRL_UPSTREAM		BIT(30)
 #define TB_LC_SX_CTRL_SLP		BIT(31)
 
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
new file mode 100644
index 0000000..e254f8c
--- /dev/null
+++ b/drivers/thunderbolt/test.c
@@ -0,0 +1,1637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/idr.h>
+
+#include "tb.h"
+#include "tunnel.h"
+
+static int __ida_init(struct kunit_resource *res, void *context)
+{
+	struct ida *ida = context;
+
+	ida_init(ida);
+	res->data = ida;
+	return 0;
+}
+
+static void __ida_destroy(struct kunit_resource *res)
+{
+	struct ida *ida = res->data;
+
+	ida_destroy(ida);
+}
+
+static void kunit_ida_init(struct kunit *test, struct ida *ida)
+{
+	kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
+}
+
+static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
+				      u8 upstream_port, u8 max_port_number)
+{
+	struct tb_switch *sw;
+	size_t size;
+	int i;
+
+	sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
+	if (!sw)
+		return NULL;
+
+	sw->config.upstream_port_number = upstream_port;
+	sw->config.depth = tb_route_length(route);
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
+	sw->config.enabled = 0;
+	sw->config.max_port_number = max_port_number;
+
+	size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
+	sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
+	if (!sw->ports)
+		return NULL;
+
+	for (i = 0; i <= sw->config.max_port_number; i++) {
+		sw->ports[i].sw = sw;
+		sw->ports[i].port = i;
+		sw->ports[i].config.port_number = i;
+		if (i) {
+			kunit_ida_init(test, &sw->ports[i].in_hopids);
+			kunit_ida_init(test, &sw->ports[i].out_hopids);
+		}
+	}
+
+	return sw;
+}
+
+static struct tb_switch *alloc_host(struct kunit *test)
+{
+	struct tb_switch *sw;
+
+	sw = alloc_switch(test, 0, 7, 13);
+	if (!sw)
+		return NULL;
+
+	sw->config.vendor_id = 0x8086;
+	sw->config.device_id = 0x9a1b;
+
+	sw->ports[0].config.type = TB_TYPE_PORT;
+	sw->ports[0].config.max_in_hop_id = 7;
+	sw->ports[0].config.max_out_hop_id = 7;
+
+	sw->ports[1].config.type = TB_TYPE_PORT;
+	sw->ports[1].config.max_in_hop_id = 19;
+	sw->ports[1].config.max_out_hop_id = 19;
+	sw->ports[1].dual_link_port = &sw->ports[2];
+
+	sw->ports[2].config.type = TB_TYPE_PORT;
+	sw->ports[2].config.max_in_hop_id = 19;
+	sw->ports[2].config.max_out_hop_id = 19;
+	sw->ports[2].dual_link_port = &sw->ports[1];
+	sw->ports[2].link_nr = 1;
+
+	sw->ports[3].config.type = TB_TYPE_PORT;
+	sw->ports[3].config.max_in_hop_id = 19;
+	sw->ports[3].config.max_out_hop_id = 19;
+	sw->ports[3].dual_link_port = &sw->ports[4];
+
+	sw->ports[4].config.type = TB_TYPE_PORT;
+	sw->ports[4].config.max_in_hop_id = 19;
+	sw->ports[4].config.max_out_hop_id = 19;
+	sw->ports[4].dual_link_port = &sw->ports[3];
+	sw->ports[4].link_nr = 1;
+
+	sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
+	sw->ports[5].config.max_in_hop_id = 9;
+	sw->ports[5].config.max_out_hop_id = 9;
+	sw->ports[5].cap_adap = -1;
+
+	sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
+	sw->ports[6].config.max_in_hop_id = 9;
+	sw->ports[6].config.max_out_hop_id = 9;
+	sw->ports[6].cap_adap = -1;
+
+	sw->ports[7].config.type = TB_TYPE_NHI;
+	sw->ports[7].config.max_in_hop_id = 11;
+	sw->ports[7].config.max_out_hop_id = 11;
+
+	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
+	sw->ports[8].config.max_in_hop_id = 8;
+	sw->ports[8].config.max_out_hop_id = 8;
+
+	sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
+	sw->ports[9].config.max_in_hop_id = 8;
+	sw->ports[9].config.max_out_hop_id = 8;
+
+	sw->ports[10].disabled = true;
+	sw->ports[11].disabled = true;
+
+	sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
+	sw->ports[12].config.max_in_hop_id = 8;
+	sw->ports[12].config.max_out_hop_id = 8;
+
+	sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
+	sw->ports[13].config.max_in_hop_id = 8;
+	sw->ports[13].config.max_out_hop_id = 8;
+
+	return sw;
+}
+
+static struct tb_switch *alloc_dev_default(struct kunit *test,
+					   struct tb_switch *parent,
+					   u64 route, bool bonded)
+{
+	struct tb_port *port, *upstream_port;
+	struct tb_switch *sw;
+
+	sw = alloc_switch(test, route, 1, 19);
+	if (!sw)
+		return NULL;
+
+	sw->config.vendor_id = 0x8086;
+	sw->config.device_id = 0x15ef;
+
+	sw->ports[0].config.type = TB_TYPE_PORT;
+	sw->ports[0].config.max_in_hop_id = 8;
+	sw->ports[0].config.max_out_hop_id = 8;
+
+	sw->ports[1].config.type = TB_TYPE_PORT;
+	sw->ports[1].config.max_in_hop_id = 19;
+	sw->ports[1].config.max_out_hop_id = 19;
+	sw->ports[1].dual_link_port = &sw->ports[2];
+
+	sw->ports[2].config.type = TB_TYPE_PORT;
+	sw->ports[2].config.max_in_hop_id = 19;
+	sw->ports[2].config.max_out_hop_id = 19;
+	sw->ports[2].dual_link_port = &sw->ports[1];
+	sw->ports[2].link_nr = 1;
+
+	sw->ports[3].config.type = TB_TYPE_PORT;
+	sw->ports[3].config.max_in_hop_id = 19;
+	sw->ports[3].config.max_out_hop_id = 19;
+	sw->ports[3].dual_link_port = &sw->ports[4];
+
+	sw->ports[4].config.type = TB_TYPE_PORT;
+	sw->ports[4].config.max_in_hop_id = 19;
+	sw->ports[4].config.max_out_hop_id = 19;
+	sw->ports[4].dual_link_port = &sw->ports[3];
+	sw->ports[4].link_nr = 1;
+
+	sw->ports[5].config.type = TB_TYPE_PORT;
+	sw->ports[5].config.max_in_hop_id = 19;
+	sw->ports[5].config.max_out_hop_id = 19;
+	sw->ports[5].dual_link_port = &sw->ports[6];
+
+	sw->ports[6].config.type = TB_TYPE_PORT;
+	sw->ports[6].config.max_in_hop_id = 19;
+	sw->ports[6].config.max_out_hop_id = 19;
+	sw->ports[6].dual_link_port = &sw->ports[5];
+	sw->ports[6].link_nr = 1;
+
+	sw->ports[7].config.type = TB_TYPE_PORT;
+	sw->ports[7].config.max_in_hop_id = 19;
+	sw->ports[7].config.max_out_hop_id = 19;
+	sw->ports[7].dual_link_port = &sw->ports[8];
+
+	sw->ports[8].config.type = TB_TYPE_PORT;
+	sw->ports[8].config.max_in_hop_id = 19;
+	sw->ports[8].config.max_out_hop_id = 19;
+	sw->ports[8].dual_link_port = &sw->ports[7];
+	sw->ports[8].link_nr = 1;
+
+	sw->ports[9].config.type = TB_TYPE_PCIE_UP;
+	sw->ports[9].config.max_in_hop_id = 8;
+	sw->ports[9].config.max_out_hop_id = 8;
+
+	sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
+	sw->ports[10].config.max_in_hop_id = 8;
+	sw->ports[10].config.max_out_hop_id = 8;
+
+	sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
+	sw->ports[11].config.max_in_hop_id = 8;
+	sw->ports[11].config.max_out_hop_id = 8;
+
+	sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
+	sw->ports[12].config.max_in_hop_id = 8;
+	sw->ports[12].config.max_out_hop_id = 8;
+
+	sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
+	sw->ports[13].config.max_in_hop_id = 9;
+	sw->ports[13].config.max_out_hop_id = 9;
+	sw->ports[13].cap_adap = -1;
+
+	sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
+	sw->ports[14].config.max_in_hop_id = 9;
+	sw->ports[14].config.max_out_hop_id = 9;
+	sw->ports[14].cap_adap = -1;
+
+	sw->ports[15].disabled = true;
+
+	sw->ports[16].config.type = TB_TYPE_USB3_UP;
+	sw->ports[16].config.max_in_hop_id = 8;
+	sw->ports[16].config.max_out_hop_id = 8;
+
+	sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
+	sw->ports[17].config.max_in_hop_id = 8;
+	sw->ports[17].config.max_out_hop_id = 8;
+
+	sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
+	sw->ports[18].config.max_in_hop_id = 8;
+	sw->ports[18].config.max_out_hop_id = 8;
+
+	sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
+	sw->ports[19].config.max_in_hop_id = 8;
+	sw->ports[19].config.max_out_hop_id = 8;
+
+	if (!parent)
+		return sw;
+
+	/* Link them */
+	upstream_port = tb_upstream_port(sw);
+	port = tb_port_at(route, parent);
+	port->remote = upstream_port;
+	upstream_port->remote = port;
+	if (port->dual_link_port && upstream_port->dual_link_port) {
+		port->dual_link_port->remote = upstream_port->dual_link_port;
+		upstream_port->dual_link_port->remote = port->dual_link_port;
+
+		if (bonded) {
+			/* Bonding is used */
+			port->bonded = true;
+			port->dual_link_port->bonded = true;
+			upstream_port->bonded = true;
+			upstream_port->dual_link_port->bonded = true;
+		}
+	}
+
+	return sw;
+}
+
+static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
+					     struct tb_switch *parent,
+					     u64 route, bool bonded)
+{
+	struct tb_switch *sw;
+
+	sw = alloc_dev_default(test, parent, route, bonded);
+	if (!sw)
+		return NULL;
+
+	sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
+	sw->ports[13].config.max_in_hop_id = 9;
+	sw->ports[13].config.max_out_hop_id = 9;
+
+	sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
+	sw->ports[14].config.max_in_hop_id = 9;
+	sw->ports[14].config.max_out_hop_id = 9;
+
+	return sw;
+}
+
+static void tb_test_path_basic(struct kunit *test)
+{
+	struct tb_port *src_port, *dst_port, *p;
+	struct tb_switch *host;
+
+	host = alloc_host(test);
+
+	src_port = &host->ports[5];
+	dst_port = src_port;
+
+	p = tb_next_port_on_path(src_port, dst_port, NULL);
+	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
+
+	p = tb_next_port_on_path(src_port, dst_port, p);
+	KUNIT_EXPECT_TRUE(test, !p);
+}
+
+static void tb_test_path_not_connected_walk(struct kunit *test)
+{
+	struct tb_port *src_port, *dst_port, *p;
+	struct tb_switch *host, *dev;
+
+	host = alloc_host(test);
+	/* No connection between host and dev */
+	dev = alloc_dev_default(test, NULL, 3, true);
+
+	src_port = &host->ports[12];
+	dst_port = &dev->ports[16];
+
+	p = tb_next_port_on_path(src_port, dst_port, NULL);
+	KUNIT_EXPECT_PTR_EQ(test, p, src_port);
+
+	p = tb_next_port_on_path(src_port, dst_port, p);
+	KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
+
+	p = tb_next_port_on_path(src_port, dst_port, p);
+	KUNIT_EXPECT_TRUE(test, !p);
+
+	/* Other direction */
+
+	p = tb_next_port_on_path(dst_port, src_port, NULL);
+	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
+
+	p = tb_next_port_on_path(dst_port, src_port, p);
+	KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
+
+	p = tb_next_port_on_path(dst_port, src_port, p);
+	KUNIT_EXPECT_TRUE(test, !p);
+}
+
+struct port_expectation {
+	u64 route;
+	u8 port;
+	enum tb_port_type type;
+};
+
+static void tb_test_path_single_hop_walk(struct kunit *test)
+{
+	/*
+	 * Walks from Host PCIe downstream port to Device #1 PCIe
+	 * upstream port.
+	 *
+	 *   [Host]
+	 *   1 |
+	 *   1 |
+	 *  [Device]
+	 */
+	static const struct port_expectation test_data[] = {
+		{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
+		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
+	};
+	struct tb_port *src_port, *dst_port, *p;
+	struct tb_switch *host, *dev;
+	int i;
+
+	host = alloc_host(test);
+	dev = alloc_dev_default(test, host, 1, true);
+
+	src_port = &host->ports[8];
+	dst_port = &dev->ports[9];
+
+	/* Walk both directions */
+
+	i = 0;
+	tb_for_each_port_on_path(src_port, dst_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i++;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+	i = ARRAY_SIZE(test_data) - 1;
+	tb_for_each_port_on_path(dst_port, src_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i--;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_daisy_chain_walk(struct kunit *test)
+{
+	/*
+	 * Walks from Host DP IN to Device #2 DP OUT.
+	 *
+	 *           [Host]
+	 *            1 |
+	 *            1 |
+	 *         [Device #1]
+	 *       3 /
+	 *      1 /
+	 * [Device #2]
+	 */
+	static const struct port_expectation test_data[] = {
+		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
+		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+	};
+	struct tb_port *src_port, *dst_port, *p;
+	struct tb_switch *host, *dev1, *dev2;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x301, true);
+
+	src_port = &host->ports[5];
+	dst_port = &dev2->ports[13];
+
+	/* Walk both directions */
+
+	i = 0;
+	tb_for_each_port_on_path(src_port, dst_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i++;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+	i = ARRAY_SIZE(test_data) - 1;
+	tb_for_each_port_on_path(dst_port, src_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i--;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_simple_tree_walk(struct kunit *test)
+{
+	/*
+	 * Walks from Host DP IN to Device #3 DP OUT.
+	 *
+	 *           [Host]
+	 *            1 |
+	 *            1 |
+	 *         [Device #1]
+	 *       3 /   | 5  \ 7
+	 *      1 /    |     \ 1
+	 * [Device #2] |    [Device #4]
+	 *             | 1
+	 *         [Device #3]
+	 */
+	static const struct port_expectation test_data[] = {
+		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
+		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
+		{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+	};
+	struct tb_port *src_port, *dst_port, *p;
+	struct tb_switch *host, *dev1, *dev3;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	alloc_dev_default(test, dev1, 0x301, true);
+	dev3 = alloc_dev_default(test, dev1, 0x501, true);
+	alloc_dev_default(test, dev1, 0x701, true);
+
+	src_port = &host->ports[5];
+	dst_port = &dev3->ports[13];
+
+	/* Walk both directions */
+
+	i = 0;
+	tb_for_each_port_on_path(src_port, dst_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i++;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+	i = ARRAY_SIZE(test_data) - 1;
+	tb_for_each_port_on_path(dst_port, src_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i--;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_complex_tree_walk(struct kunit *test)
+{
+	/*
+	 * Walks from Device #3 DP IN to Device #9 DP OUT.
+	 *
+	 *           [Host]
+	 *            1 |
+	 *            1 |
+	 *         [Device #1]
+	 *       3 /   | 5  \ 7
+	 *      1 /    |     \ 1
+	 * [Device #2] |    [Device #5]
+	 *    5 |      | 1         \ 7
+	 *    1 |  [Device #4]      \ 1
+	 * [Device #3]             [Device #6]
+	 *                       3 /
+	 *                      1 /
+	 *                    [Device #7]
+	 *                  3 /      | 5
+	 *                 1 /       |
+	 *               [Device #8] | 1
+	 *                       [Device #9]
+	 */
+	static const struct port_expectation test_data[] = {
+		{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
+		{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
+		{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
+		{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
+		{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
+	};
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
+	struct tb_port *src_port, *dst_port, *p;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x301, true);
+	dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
+	alloc_dev_default(test, dev1, 0x501, true);
+	dev5 = alloc_dev_default(test, dev1, 0x701, true);
+	dev6 = alloc_dev_default(test, dev5, 0x70701, true);
+	dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
+	alloc_dev_default(test, dev7, 0x303070701, true);
+	dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
+
+	src_port = &dev3->ports[13];
+	dst_port = &dev9->ports[14];
+
+	/* Walk both directions */
+
+	i = 0;
+	tb_for_each_port_on_path(src_port, dst_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i++;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+	i = ARRAY_SIZE(test_data) - 1;
+	tb_for_each_port_on_path(dst_port, src_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i--;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_max_length_walk(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
+	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
+	struct tb_port *src_port, *dst_port, *p;
+	int i;
+
+	/*
+	 * Walks from Device #6 DP IN to Device #12 DP OUT.
+	 *
+	 *          [Host]
+	 *         1 /  \ 3
+	 *        1 /    \ 1
+	 * [Device #1]   [Device #7]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #2]   [Device #8]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #3]   [Device #9]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #4]   [Device #10]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #5]   [Device #11]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #6]   [Device #12]
+	 */
+	static const struct port_expectation test_data[] = {
+		{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
+		{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
+		{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
+		{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+	};
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x301, true);
+	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
+	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
+	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
+	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
+	dev7 = alloc_dev_default(test, host, 0x3, true);
+	dev8 = alloc_dev_default(test, dev7, 0x303, true);
+	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
+	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
+	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
+	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
+
+	src_port = &dev6->ports[13];
+	dst_port = &dev12->ports[13];
+
+	/* Walk both directions */
+
+	i = 0;
+	tb_for_each_port_on_path(src_port, dst_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i++;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+	i = ARRAY_SIZE(test_data) - 1;
+	tb_for_each_port_on_path(dst_port, src_port, p) {
+		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+				test_data[i].type);
+		i--;
+	}
+
+	KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_not_connected(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2;
+	struct tb_port *down, *up;
+	struct tb_path *path;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x3, false);
+	/* Not connected to anything */
+	dev2 = alloc_dev_default(test, NULL, 0x303, false);
+
+	down = &dev1->ports[10];
+	up = &dev2->ports[9];
+
+	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
+	KUNIT_ASSERT_TRUE(test, path == NULL);
+	path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
+	KUNIT_ASSERT_TRUE(test, path == NULL);
+}
+
+struct hop_expectation {
+	u64 route;
+	u8 in_port;
+	enum tb_port_type in_type;
+	u8 out_port;
+	enum tb_port_type out_type;
+};
+
+static void tb_test_path_not_bonded_lane0(struct kunit *test)
+{
+	/*
+	 * PCIe path from host to device using lane 0.
+	 *
+	 *   [Host]
+	 *   3 |: 4
+	 *   1 |: 2
+	 *  [Device]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x0,
+			.in_port = 9,
+			.in_type = TB_TYPE_PCIE_DOWN,
+			.out_port = 3,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x3,
+			.in_port = 1,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 9,
+			.out_type = TB_TYPE_PCIE_UP,
+		},
+	};
+	struct tb_switch *host, *dev;
+	struct tb_port *down, *up;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev = alloc_dev_default(test, host, 0x3, false);
+
+	down = &host->ports[9];
+	up = &dev->ports[9];
+
+	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1(struct kunit *test)
+{
+	/*
+	 * DP Video path from host to device using lane 1. Paths like
+	 * these are only used with Thunderbolt 1 devices where lane
+	 * bonding is not possible. USB4 specifically does not allow
+	 * paths like this (you either use lane 0 where lane 1 is
+	 * disabled or both lanes are bonded).
+	 *
+	 *   [Host]
+	 *   1 :| 2
+	 *   1 :| 2
+	 *  [Device]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x0,
+			.in_port = 5,
+			.in_type = TB_TYPE_DP_HDMI_IN,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x1,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 13,
+			.out_type = TB_TYPE_DP_HDMI_OUT,
+		},
+	};
+	struct tb_switch *host, *dev;
+	struct tb_port *in, *out;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev = alloc_dev_default(test, host, 0x1, false);
+
+	in = &host->ports[5];
+	out = &dev->ports[13];
+
+	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
+{
+	/*
+	 * DP Video path from host to device 3 using lane 1.
+	 *
+	 *    [Host]
+	 *    1 :| 2
+	 *    1 :| 2
+	 *  [Device #1]
+	 *    7 :| 8
+	 *    1 :| 2
+	 *  [Device #2]
+	 *    5 :| 6
+	 *    1 :| 2
+	 *  [Device #3]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x0,
+			.in_port = 5,
+			.in_type = TB_TYPE_DP_HDMI_IN,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x1,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 8,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x701,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 6,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x50701,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 13,
+			.out_type = TB_TYPE_DP_HDMI_OUT,
+		},
+	};
+	struct tb_switch *host, *dev1, *dev2, *dev3;
+	struct tb_port *in, *out;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, false);
+	dev2 = alloc_dev_default(test, dev1, 0x701, false);
+	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+
+	in = &host->ports[5];
+	out = &dev3->ports[13];
+
+	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
+{
+	/*
+	 * DP Video path from device 3 to host using lane 1.
+	 *
+	 *    [Host]
+	 *    1 :| 2
+	 *    1 :| 2
+	 *  [Device #1]
+	 *    7 :| 8
+	 *    1 :| 2
+	 *  [Device #2]
+	 *    5 :| 6
+	 *    1 :| 2
+	 *  [Device #3]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x50701,
+			.in_port = 13,
+			.in_type = TB_TYPE_DP_HDMI_IN,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x701,
+			.in_port = 6,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x1,
+			.in_port = 8,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x0,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 5,
+			.out_type = TB_TYPE_DP_HDMI_IN,
+		},
+	};
+	struct tb_switch *host, *dev1, *dev2, *dev3;
+	struct tb_port *in, *out;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, false);
+	dev2 = alloc_dev_default(test, dev1, 0x701, false);
+	dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
+
+	in = &dev3->ports[13];
+	out = &host->ports[5];
+
+	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_path_mixed_chain(struct kunit *test)
+{
+	/*
+	 * DP Video path from host to device 4 where first and last link
+	 * is bonded.
+	 *
+	 *    [Host]
+	 *    1 |
+	 *    1 |
+	 *  [Device #1]
+	 *    7 :| 8
+	 *    1 :| 2
+	 *  [Device #2]
+	 *    5 :| 6
+	 *    1 :| 2
+	 *  [Device #3]
+	 *    3 |
+	 *    1 |
+	 *  [Device #4]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x0,
+			.in_port = 5,
+			.in_type = TB_TYPE_DP_HDMI_IN,
+			.out_port = 1,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x1,
+			.in_port = 1,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 8,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x701,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 6,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x50701,
+			.in_port = 2,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 3,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x3050701,
+			.in_port = 1,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 13,
+			.out_type = TB_TYPE_DP_HDMI_OUT,
+		},
+	};
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
+	struct tb_port *in, *out;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x701, false);
+	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
+
+	in = &host->ports[5];
+	out = &dev4->ports[13];
+
+	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_path_mixed_chain_reverse(struct kunit *test)
+{
+	/*
+	 * DP Video path from device 4 to host where first and last link
+	 * is bonded.
+	 *
+	 *    [Host]
+	 *    1 |
+	 *    1 |
+	 *  [Device #1]
+	 *    7 :| 8
+	 *    1 :| 2
+	 *  [Device #2]
+	 *    5 :| 6
+	 *    1 :| 2
+	 *  [Device #3]
+	 *    3 |
+	 *    1 |
+	 *  [Device #4]
+	 */
+	static const struct hop_expectation test_data[] = {
+		{
+			.route = 0x3050701,
+			.in_port = 13,
+			.in_type = TB_TYPE_DP_HDMI_OUT,
+			.out_port = 1,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x50701,
+			.in_port = 3,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x701,
+			.in_port = 6,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 2,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x1,
+			.in_port = 8,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 1,
+			.out_type = TB_TYPE_PORT,
+		},
+		{
+			.route = 0x0,
+			.in_port = 1,
+			.in_type = TB_TYPE_PORT,
+			.out_port = 5,
+			.out_type = TB_TYPE_DP_HDMI_IN,
+		},
+	};
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
+	struct tb_port *in, *out;
+	struct tb_path *path;
+	int i;
+
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x701, false);
+	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
+
+	in = &dev4->ports[13];
+	out = &host->ports[5];
+
+	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+	KUNIT_ASSERT_TRUE(test, path != NULL);
+	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+		const struct tb_port *in_port, *out_port;
+
+		in_port = path->hops[i].in_port;
+		out_port = path->hops[i].out_port;
+
+		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+				test_data[i].in_type);
+		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+				test_data[i].out_type);
+	}
+	tb_path_free(path);
+}
+
+static void tb_test_tunnel_pcie(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2;
+	struct tb_tunnel *tunnel1, *tunnel2;
+	struct tb_port *down, *up;
+
+	/*
+	 * Create PCIe tunnel between host and two devices.
+	 *
+	 *   [Host]
+	 *    1 |
+	 *    1 |
+	 *  [Device #1]
+	 *    5 |
+	 *    1 |
+	 *  [Device #2]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x501, true);
+
+	down = &host->ports[8];
+	up = &dev1->ports[9];
+	tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
+	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
+
+	down = &dev1->ports[10];
+	up = &dev2->ports[9];
+	tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
+	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
+
+	tb_tunnel_free(tunnel2);
+	tb_tunnel_free(tunnel1);
+}
+
+static void tb_test_tunnel_dp(struct kunit *test)
+{
+	struct tb_switch *host, *dev;
+	struct tb_port *in, *out;
+	struct tb_tunnel *tunnel;
+
+	/*
+	 * Create DP tunnel between Host and Device
+	 *
+	 *   [Host]
+	 *   1 |
+	 *   1 |
+	 *  [Device]
+	 */
+	host = alloc_host(test);
+	dev = alloc_dev_default(test, host, 0x3, true);
+
+	in = &host->ports[5];
+	out = &dev->ports[13];
+
+	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_chain(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev4;
+	struct tb_port *in, *out;
+	struct tb_tunnel *tunnel;
+
+	/*
+	 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
+	 *
+	 *           [Host]
+	 *            1 |
+	 *            1 |
+	 *         [Device #1]
+	 *       3 /   | 5  \ 7
+	 *      1 /    |     \ 1
+	 * [Device #2] |    [Device #4]
+	 *             | 1
+	 *         [Device #3]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	alloc_dev_default(test, dev1, 0x301, true);
+	alloc_dev_default(test, dev1, 0x501, true);
+	dev4 = alloc_dev_default(test, dev1, 0x701, true);
+
+	in = &host->ports[5];
+	out = &dev4->ports[14];
+
+	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_tree(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
+	struct tb_port *in, *out;
+	struct tb_tunnel *tunnel;
+
+	/*
+	 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
+	 *
+	 *          [Host]
+	 *           3 |
+	 *           1 |
+	 *         [Device #1]
+	 *       3 /   | 5  \ 7
+	 *      1 /    |     \ 1
+	 * [Device #2] |    [Device #4]
+	 *             | 1
+	 *         [Device #3]
+	 *             | 5
+	 *             | 1
+	 *         [Device #5]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x3, true);
+	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
+	dev3 = alloc_dev_default(test, dev1, 0x503, true);
+	alloc_dev_default(test, dev1, 0x703, true);
+	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
+
+	in = &dev2->ports[13];
+	out = &dev5->ports[13];
+
+	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_max_length(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
+	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
+	struct tb_port *in, *out;
+	struct tb_tunnel *tunnel;
+
+	/*
+	 * Creates DP tunnel from Device #6 to Device #12.
+	 *
+	 *          [Host]
+	 *         1 /  \ 3
+	 *        1 /    \ 1
+	 * [Device #1]   [Device #7]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #2]   [Device #8]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #3]   [Device #9]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #4]   [Device #10]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #5]   [Device #11]
+	 *     3 |           | 3
+	 *     1 |           | 1
+	 * [Device #6]   [Device #12]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x301, true);
+	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
+	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
+	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
+	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
+	dev7 = alloc_dev_default(test, host, 0x3, true);
+	dev8 = alloc_dev_default(test, dev7, 0x303, true);
+	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
+	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
+	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
+	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
+
+	in = &dev6->ports[13];
+	out = &dev12->ports[13];
+
+	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
+	/* First hop */
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+	/* Middle */
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
+			    &host->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
+			    &host->ports[3]);
+	/* Last */
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
+			    &host->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
+			    &host->ports[3]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
+	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
+			    &host->ports[3]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
+			    &host->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_usb3(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2;
+	struct tb_tunnel *tunnel1, *tunnel2;
+	struct tb_port *down, *up;
+
+	/*
+	 * Create USB3 tunnel between host and two devices.
+	 *
+	 *   [Host]
+	 *    1 |
+	 *    1 |
+	 *  [Device #1]
+	 *          \ 7
+	 *           \ 1
+	 *         [Device #2]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x701, true);
+
+	down = &host->ports[12];
+	up = &dev1->ports[16];
+	tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
+
+	down = &dev1->ports[17];
+	up = &dev2->ports[16];
+	tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
+	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
+
+	tb_tunnel_free(tunnel2);
+	tb_tunnel_free(tunnel1);
+}
+
+static void tb_test_tunnel_port_on_path(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
+	struct tb_port *in, *out, *port;
+	struct tb_tunnel *dp_tunnel;
+
+	/*
+	 *          [Host]
+	 *           3 |
+	 *           1 |
+	 *         [Device #1]
+	 *       3 /   | 5  \ 7
+	 *      1 /    |     \ 1
+	 * [Device #2] |    [Device #4]
+	 *             | 1
+	 *         [Device #3]
+	 *             | 5
+	 *             | 1
+	 *         [Device #5]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x3, true);
+	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
+	dev3 = alloc_dev_default(test, dev1, 0x503, true);
+	dev4 = alloc_dev_default(test, dev1, 0x703, true);
+	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
+
+	in = &dev2->ports[13];
+	out = &dev5->ports[13];
+
+	dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+	KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
+
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
+
+	port = &host->ports[8];
+	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &host->ports[3];
+	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev1->ports[1];
+	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev1->ports[3];
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev1->ports[5];
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev1->ports[7];
+	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev3->ports[1];
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev5->ports[1];
+	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	port = &dev4->ports[1];
+	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+	tb_tunnel_free(dp_tunnel);
+}
+
+static struct kunit_case tb_test_cases[] = {
+	KUNIT_CASE(tb_test_path_basic),
+	KUNIT_CASE(tb_test_path_not_connected_walk),
+	KUNIT_CASE(tb_test_path_single_hop_walk),
+	KUNIT_CASE(tb_test_path_daisy_chain_walk),
+	KUNIT_CASE(tb_test_path_simple_tree_walk),
+	KUNIT_CASE(tb_test_path_complex_tree_walk),
+	KUNIT_CASE(tb_test_path_max_length_walk),
+	KUNIT_CASE(tb_test_path_not_connected),
+	KUNIT_CASE(tb_test_path_not_bonded_lane0),
+	KUNIT_CASE(tb_test_path_not_bonded_lane1),
+	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
+	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
+	KUNIT_CASE(tb_test_path_mixed_chain),
+	KUNIT_CASE(tb_test_path_mixed_chain_reverse),
+	KUNIT_CASE(tb_test_tunnel_pcie),
+	KUNIT_CASE(tb_test_tunnel_dp),
+	KUNIT_CASE(tb_test_tunnel_dp_chain),
+	KUNIT_CASE(tb_test_tunnel_dp_tree),
+	KUNIT_CASE(tb_test_tunnel_dp_max_length),
+	KUNIT_CASE(tb_test_tunnel_port_on_path),
+	KUNIT_CASE(tb_test_tunnel_usb3),
+	{ }
+};
+
+static struct kunit_suite tb_test_suite = {
+	.name = "thunderbolt",
+	.test_cases = tb_test_cases,
+};
+
+static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
+
+int tb_test_init(void)
+{
+	return __kunit_test_suites_init(tb_test_suites);
+}
+
+void tb_test_exit(void)
+{
+	return __kunit_test_suites_exit(tb_test_suites);
+}
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 0000000..039c42a
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *	    Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+	bool root_switch = !tb_route(sw);
+
+	switch (sw->tmu.rate) {
+	case TB_SWITCH_TMU_RATE_OFF:
+		return "off";
+
+	case TB_SWITCH_TMU_RATE_HIFI:
+		/* Root switch does not have upstream directionality */
+		if (root_switch)
+			return "HiFi";
+		if (sw->tmu.unidirectional)
+			return "uni-directional, HiFi";
+		return "bi-directional, HiFi";
+
+	case TB_SWITCH_TMU_RATE_NORMAL:
+		if (root_switch)
+			return "normal";
+		return "uni-directional, normal";
+
+	default:
+		return "unknown";
+	}
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_0, 1);
+	if (ret)
+		return false;
+
+	return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_3, 1);
+	if (ret)
+		return ret;
+
+	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+	return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_3, 1);
+	if (ret)
+		return ret;
+
+	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			   sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+			     u32 value)
+{
+	u32 data;
+	int ret;
+
+	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+	if (ret)
+		return ret;
+
+	data &= ~mask;
+	data |= value;
+
+	return tb_port_write(port, &data, TB_CFG_PORT,
+			     port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+					  bool unidirectional)
+{
+	u32 val;
+
+	if (!port->sw->tmu.has_ucap)
+		return 0;
+
+	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+	return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_tmu + TMU_ADP_CS_3, 1);
+	if (ret)
+		return false;
+
+	return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_0, 1);
+	if (ret)
+		return ret;
+
+	if (set)
+		val |= TMU_RTR_CS_0_TD;
+	else
+		val &= ~TMU_RTR_CS_0_TD;
+
+	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			   sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+	struct tb_port *port;
+	int ret;
+
+	if (tb_switch_is_icm(sw))
+		return 0;
+
+	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+	if (ret > 0)
+		sw->tmu.cap = ret;
+
+	tb_switch_for_each_port(sw, port) {
+		int cap;
+
+		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+		if (cap > 0)
+			port->cap_tmu = cap;
+	}
+
+	ret = tb_switch_tmu_rate_read(sw);
+	if (ret < 0)
+		return ret;
+
+	sw->tmu.rate = ret;
+
+	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+	if (sw->tmu.has_ucap) {
+		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+		if (tb_route(sw)) {
+			struct tb_port *up = tb_upstream_port(sw);
+
+			sw->tmu.unidirectional =
+				tb_port_tmu_is_unidirectional(up);
+		}
+	} else {
+		sw->tmu.unidirectional = false;
+	}
+
+	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+	return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+	unsigned int  post_local_time_offset, post_time_offset;
+	struct tb_switch *root_switch = sw->tb->root_switch;
+	u64 hi, mid, lo, local_time, post_time;
+	int i, ret, retries = 100;
+	u32 gm_local_time[3];
+
+	if (!tb_route(sw))
+		return 0;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	/* Need to be able to read the grand master time */
+	if (!root_switch->tmu.cap)
+		return 0;
+
+	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+			 root_switch->tmu.cap + TMU_RTR_CS_1,
+			 ARRAY_SIZE(gm_local_time));
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+			  gm_local_time[i]);
+
+	/* Convert to nanoseconds (drop fractional part) */
+	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+	mid = gm_local_time[1];
+	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+	local_time = hi << 48 | mid << 16 | lo;
+
+	/* Tell the switch that time sync is disrupted for a while */
+	ret = tb_switch_tmu_set_time_disruption(sw, true);
+	if (ret)
+		return ret;
+
+	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+	/*
+	 * Write the Grandmaster time to the Post Local Time registers
+	 * of the new switch.
+	 */
+	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+			  post_local_time_offset, 2);
+	if (ret)
+		goto out;
+
+	/*
+	 * Have the new switch update its local time (by writing 1 to
+	 * the post_time registers) and wait for the completion of the
+	 * same (post_time register becomes 0). This means the time has
+	 * been converged properly.
+	 */
+	post_time = 1;
+
+	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+	if (ret)
+		goto out;
+
+	do {
+		usleep_range(5, 10);
+		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+				 post_time_offset, 2);
+		if (ret)
+			goto out;
+	} while (--retries && post_time);
+
+	if (!retries) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+	tb_switch_tmu_set_time_disruption(sw, false);
+	return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+	int ret;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	/* Already disabled? */
+	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+		return 0;
+
+	if (sw->tmu.unidirectional) {
+		struct tb_switch *parent = tb_switch_parent(sw);
+		struct tb_port *up, *down;
+
+		up = tb_upstream_port(sw);
+		down = tb_port_at(tb_route(sw), parent);
+
+		/* The switch may be unplugged so ignore any errors */
+		tb_port_tmu_unidirectional_disable(up);
+		ret = tb_port_tmu_unidirectional_disable(down);
+		if (ret)
+			return ret;
+	}
+
+	tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+	sw->tmu.unidirectional = false;
+	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+	tb_sw_dbg(sw, "TMU: disabled\n");
+	return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+	int ret;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	if (tb_switch_tmu_is_enabled(sw))
+		return 0;
+
+	ret = tb_switch_tmu_set_time_disruption(sw, true);
+	if (ret)
+		return ret;
+
+	/* Change mode to bi-directional */
+	if (tb_route(sw) && sw->tmu.unidirectional) {
+		struct tb_switch *parent = tb_switch_parent(sw);
+		struct tb_port *up, *down;
+
+		up = tb_upstream_port(sw);
+		down = tb_port_at(tb_route(sw), parent);
+
+		ret = tb_port_tmu_unidirectional_disable(down);
+		if (ret)
+			return ret;
+
+		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+		if (ret)
+			return ret;
+
+		ret = tb_port_tmu_unidirectional_disable(up);
+		if (ret)
+			return ret;
+	} else {
+		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+		if (ret)
+			return ret;
+	}
+
+	sw->tmu.unidirectional = false;
+	sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+	return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 5a99234..829b6cc 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -6,6 +6,7 @@
  * Copyright (C) 2019, Intel Corporation
  */
 
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 
@@ -18,6 +19,12 @@
 #define TB_PCI_PATH_DOWN		0
 #define TB_PCI_PATH_UP			1
 
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID			8
+
+#define TB_USB3_PATH_DOWN		0
+#define TB_USB3_PATH_UP			1
+
 /* DP adapters use HopID 8 for AUX and 9 for Video */
 #define TB_DP_AUX_TX_HOPID		8
 #define TB_DP_AUX_RX_HOPID		8
@@ -30,7 +37,7 @@
 #define TB_DMA_PATH_OUT			0
 #define TB_DMA_PATH_IN			1
 
-static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
 
 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
 	do {                                                            \
@@ -90,6 +97,22 @@
 	return 0;
 }
 
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+	/* If the path is complete sw is not NULL */
+	if (sw) {
+		/* More credits for faster link */
+		switch (sw->link_speed * sw->link_width) {
+		case 40:
+			return 32;
+		case 20:
+			return 24;
+		}
+	}
+
+	return 16;
+}
+
 static void tb_pci_init_path(struct tb_path *path)
 {
 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
@@ -101,7 +124,9 @@
 	path->drop_packages = 0;
 	path->nfc_credits = 0;
 	path->hops[0].initial_credits = 7;
-	path->hops[1].initial_credits = 16;
+	if (path->path_length > 1)
+		path->hops[1].initial_credits =
+			tb_initial_credits(path->hops[1].in_port->sw);
 }
 
 /**
@@ -225,12 +250,180 @@
 	return tunnel;
 }
 
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+	/* Titan Ridge DP adapters need the same treatment as USB4 */
+	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+	int timeout = 10;
+	u32 val;
+	int ret;
+
+	/* Both ends need to support this */
+	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
+		return 0;
+
+	ret = tb_port_read(out, &val, TB_CFG_PORT,
+			   out->cap_adap + DP_STATUS_CTRL, 1);
+	if (ret)
+		return ret;
+
+	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+	ret = tb_port_write(out, &val, TB_CFG_PORT,
+			    out->cap_adap + DP_STATUS_CTRL, 1);
+	if (ret)
+		return ret;
+
+	do {
+		ret = tb_port_read(out, &val, TB_CFG_PORT,
+				   out->cap_adap + DP_STATUS_CTRL, 1);
+		if (ret)
+			return ret;
+		if (!(val & DP_STATUS_CTRL_CMHS))
+			return 0;
+		usleep_range(10, 100);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+	switch (rate) {
+	case DP_COMMON_CAP_RATE_RBR:
+		return 1620;
+	case DP_COMMON_CAP_RATE_HBR:
+		return 2700;
+	case DP_COMMON_CAP_RATE_HBR2:
+		return 5400;
+	case DP_COMMON_CAP_RATE_HBR3:
+		return 8100;
+	default:
+		return 0;
+	}
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+	val &= ~DP_COMMON_CAP_RATE_MASK;
+	switch (rate) {
+	default:
+		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+		fallthrough;
+	case 1620:
+		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+		break;
+	case 2700:
+		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+		break;
+	case 5400:
+		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+		break;
+	case 8100:
+		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+		break;
+	}
+	return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+	switch (lanes) {
+	case DP_COMMON_CAP_1_LANE:
+		return 1;
+	case DP_COMMON_CAP_2_LANES:
+		return 2;
+	case DP_COMMON_CAP_4_LANES:
+		return 4;
+	default:
+		return 0;
+	}
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+	val &= ~DP_COMMON_CAP_LANES_MASK;
+	switch (lanes) {
+	default:
+		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+		     lanes);
+		fallthrough;
+	case 1:
+		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+		break;
+	case 2:
+		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+		break;
+	case 4:
+		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+		break;
+	}
+	return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+	/* Tunneling removes the DP 8b/10b encoding */
+	return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+				  u32 out_rate, u32 out_lanes, u32 *new_rate,
+				  u32 *new_lanes)
+{
+	static const u32 dp_bw[][2] = {
+		/* Mb/s, lanes */
+		{ 8100, 4 }, /* 25920 Mb/s */
+		{ 5400, 4 }, /* 17280 Mb/s */
+		{ 8100, 2 }, /* 12960 Mb/s */
+		{ 2700, 4 }, /* 8640 Mb/s */
+		{ 5400, 2 }, /* 8640 Mb/s */
+		{ 8100, 1 }, /* 6480 Mb/s */
+		{ 1620, 4 }, /* 5184 Mb/s */
+		{ 5400, 1 }, /* 4320 Mb/s */
+		{ 2700, 2 }, /* 4320 Mb/s */
+		{ 1620, 2 }, /* 2592 Mb/s */
+		{ 2700, 1 }, /* 2160 Mb/s */
+		{ 1620, 1 }, /* 1296 Mb/s */
+	};
+	unsigned int i;
+
+	/*
+	 * Find a combination that can fit into max_bw and does not
+	 * exceed the maximum rate and lanes supported by the DP OUT and
+	 * DP IN adapters.
+	 */
+	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+			continue;
+
+		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+			continue;
+
+		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+			*new_rate = dp_bw[i][0];
+			*new_lanes = dp_bw[i][1];
+			return 0;
+		}
+	}
+
+	return -ENOSR;
+}
+
 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 {
+	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 	struct tb_port *out = tunnel->dst_port;
 	struct tb_port *in = tunnel->src_port;
-	u32 in_dp_cap, out_dp_cap;
-	int ret;
+	int ret, max_bw;
 
 	/*
 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
@@ -239,25 +432,76 @@
 	if (in->sw->generation < 2 || out->sw->generation < 2)
 		return 0;
 
+	/*
+	 * Perform connection manager handshake between IN and OUT ports
+	 * before capabilities exchange can take place.
+	 */
+	ret = tb_dp_cm_handshake(in, out);
+	if (ret)
+		return ret;
+
 	/* Read both DP_LOCAL_CAP registers */
 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
-			   in->cap_adap + TB_DP_LOCAL_CAP, 1);
+			   in->cap_adap + DP_LOCAL_CAP, 1);
 	if (ret)
 		return ret;
 
 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
-			   out->cap_adap + TB_DP_LOCAL_CAP, 1);
+			   out->cap_adap + DP_LOCAL_CAP, 1);
 	if (ret)
 		return ret;
 
 	/* Write IN local caps to OUT remote caps */
 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
-			    out->cap_adap + TB_DP_REMOTE_CAP, 1);
+			    out->cap_adap + DP_REMOTE_CAP, 1);
 	if (ret)
 		return ret;
 
+	in_rate = tb_dp_cap_get_rate(in_dp_cap);
+	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+	/*
+	 * If the tunnel bandwidth is limited (max_bw is set) then see
+	 * if we need to reduce bandwidth to fit there.
+	 */
+	out_rate = tb_dp_cap_get_rate(out_dp_cap);
+	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+	bw = tb_dp_bandwidth(out_rate, out_lanes);
+	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+		    out_rate, out_lanes, bw);
+
+	if (in->sw->config.depth < out->sw->config.depth)
+		max_bw = tunnel->max_down;
+	else
+		max_bw = tunnel->max_up;
+
+	if (max_bw && bw > max_bw) {
+		u32 new_rate, new_lanes, new_bw;
+
+		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
+					     out_rate, out_lanes, &new_rate,
+					     &new_lanes);
+		if (ret) {
+			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+			return ret;
+		}
+
+		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+			    new_rate, new_lanes, new_bw);
+
+		/*
+		 * Set new rate and number of lanes before writing it to
+		 * the IN port remote caps.
+		 */
+		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+	}
+
 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
-			     in->cap_adap + TB_DP_REMOTE_CAP, 1);
+			     in->cap_adap + DP_REMOTE_CAP, 1);
 }
 
 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
@@ -297,6 +541,67 @@
 	return 0;
 }
 
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+				    int *consumed_down)
+{
+	struct tb_port *in = tunnel->src_port;
+	const struct tb_switch *sw = in->sw;
+	u32 val, rate = 0, lanes = 0;
+	int ret;
+
+	if (tb_dp_is_usb4(sw)) {
+		int timeout = 20;
+
+		/*
+		 * Wait for DPRX done. Normally it should be already set
+		 * for active tunnel.
+		 */
+		do {
+			ret = tb_port_read(in, &val, TB_CFG_PORT,
+					   in->cap_adap + DP_COMMON_CAP, 1);
+			if (ret)
+				return ret;
+
+			if (val & DP_COMMON_CAP_DPRX_DONE) {
+				rate = tb_dp_cap_get_rate(val);
+				lanes = tb_dp_cap_get_lanes(val);
+				break;
+			}
+			msleep(250);
+		} while (timeout--);
+
+		if (!timeout)
+			return -ETIMEDOUT;
+	} else if (sw->generation >= 2) {
+		/*
+		 * Read from the copied remote cap so that we take into
+		 * account if capabilities were reduced during exchange.
+		 */
+		ret = tb_port_read(in, &val, TB_CFG_PORT,
+				   in->cap_adap + DP_REMOTE_CAP, 1);
+		if (ret)
+			return ret;
+
+		rate = tb_dp_cap_get_rate(val);
+		lanes = tb_dp_cap_get_lanes(val);
+	} else {
+		/* No bandwidth management for legacy devices  */
+		*consumed_up = 0;
+		*consumed_down = 0;
+		return 0;
+	}
+
+	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+		*consumed_up = 0;
+		*consumed_down = tb_dp_bandwidth(rate, lanes);
+	} else {
+		*consumed_up = tb_dp_bandwidth(rate, lanes);
+		*consumed_down = 0;
+	}
+
+	return 0;
+}
+
 static void tb_dp_init_aux_path(struct tb_path *path)
 {
 	int i;
@@ -324,12 +629,12 @@
 	path->weight = 1;
 
 	if (discover) {
-		path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 	} else {
 		u32 max_credits;
 
-		max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
-			TB_PORT_MAX_CREDITS_SHIFT;
+		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 		/* Leave some credits for AUX path */
 		path->nfc_credits = min(max_credits - 2, 12U);
 	}
@@ -361,6 +666,7 @@
 
 	tunnel->init = tb_dp_xchg_caps;
 	tunnel->activate = tb_dp_activate;
+	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 	tunnel->src_port = in;
 
 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
@@ -419,6 +725,10 @@
  * @tb: Pointer to the domain structure
  * @in: DP in adapter port
  * @out: DP out adapter port
+ * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
+ *	    if not limited)
+ * @max_down: Maximum available downstream bandwidth for the DP tunnel
+ *	      (%0 if not limited)
  *
  * Allocates a tunnel between @in and @out that is capable of tunneling
  * Display Port traffic.
@@ -426,7 +736,8 @@
  * Return: Returns a tb_tunnel on success or NULL on failure.
  */
 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
-				     struct tb_port *out)
+				     struct tb_port *out, int max_up,
+				     int max_down)
 {
 	struct tb_tunnel *tunnel;
 	struct tb_path **paths;
@@ -441,8 +752,11 @@
 
 	tunnel->init = tb_dp_xchg_caps;
 	tunnel->activate = tb_dp_activate;
+	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 	tunnel->src_port = in;
 	tunnel->dst_port = out;
+	tunnel->max_up = max_up;
+	tunnel->max_down = max_down;
 
 	paths = tunnel->paths;
 
@@ -478,8 +792,8 @@
 {
 	u32 max_credits;
 
-	max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
-		TB_PORT_MAX_CREDITS_SHIFT;
+	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 	return min(max_credits, 13U);
 }
 
@@ -562,6 +876,335 @@
 	return tunnel;
 }
 
+static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
+{
+	int ret, up_max_rate, down_max_rate;
+
+	ret = usb4_usb3_port_max_link_rate(up);
+	if (ret < 0)
+		return ret;
+	up_max_rate = ret;
+
+	ret = usb4_usb3_port_max_link_rate(down);
+	if (ret < 0)
+		return ret;
+	down_max_rate = ret;
+
+	return min(up_max_rate, down_max_rate);
+}
+
+static int tb_usb3_init(struct tb_tunnel *tunnel)
+{
+	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
+		      tunnel->allocated_up, tunnel->allocated_down);
+
+	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
+						 &tunnel->allocated_up,
+						 &tunnel->allocated_down);
+}
+
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+	int res;
+
+	res = tb_usb3_port_enable(tunnel->src_port, activate);
+	if (res)
+		return res;
+
+	if (tb_port_is_usb3_up(tunnel->dst_port))
+		return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+	return 0;
+}
+
+static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
+		int *consumed_up, int *consumed_down)
+{
+	/*
+	 * PCIe tunneling affects the USB3 bandwidth so take that it
+	 * into account here.
+	 */
+	*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
+	*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
+	return 0;
+}
+
+static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
+{
+	int ret;
+
+	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
+					       &tunnel->allocated_up,
+					       &tunnel->allocated_down);
+	if (ret)
+		return ret;
+
+	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
+		      tunnel->allocated_up, tunnel->allocated_down);
+	return 0;
+}
+
+static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+						int *available_up,
+						int *available_down)
+{
+	int ret, max_rate, allocate_up, allocate_down;
+
+	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
+	if (ret < 0) {
+		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
+		return;
+	} else if (!ret) {
+		/* Use maximum link rate if the link valid is not set */
+		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+		if (ret < 0) {
+			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
+			return;
+		}
+	}
+
+	/*
+	 * 90% of the max rate can be allocated for isochronous
+	 * transfers.
+	 */
+	max_rate = ret * 90 / 100;
+
+	/* No need to reclaim if already at maximum */
+	if (tunnel->allocated_up >= max_rate &&
+	    tunnel->allocated_down >= max_rate)
+		return;
+
+	/* Don't go lower than what is already allocated */
+	allocate_up = min(max_rate, *available_up);
+	if (allocate_up < tunnel->allocated_up)
+		allocate_up = tunnel->allocated_up;
+
+	allocate_down = min(max_rate, *available_down);
+	if (allocate_down < tunnel->allocated_down)
+		allocate_down = tunnel->allocated_down;
+
+	/* If no changes no need to do more */
+	if (allocate_up == tunnel->allocated_up &&
+	    allocate_down == tunnel->allocated_down)
+		return;
+
+	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
+						&allocate_down);
+	if (ret) {
+		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
+		return;
+	}
+
+	tunnel->allocated_up = allocate_up;
+	*available_up -= tunnel->allocated_up;
+
+	tunnel->allocated_down = allocate_down;
+	*available_down -= tunnel->allocated_down;
+
+	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
+		      tunnel->allocated_up, tunnel->allocated_down);
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 3;
+	path->weight = 3;
+	path->drop_packages = 0;
+	path->nfc_credits = 0;
+	path->hops[0].initial_credits = 7;
+	if (path->path_length > 1)
+		path->hops[1].initial_credits =
+			tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+
+	if (!tb_usb3_port_is_enabled(down))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_usb3_activate;
+	tunnel->src_port = down;
+
+	/*
+	 * Discover both paths even if they are not complete. We will
+	 * clean them up by calling tb_tunnel_deactivate() below in that
+	 * case.
+	 */
+	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+				&tunnel->dst_port, "USB3 Down");
+	if (!path) {
+		/* Just disable the downstream port */
+		tb_usb3_port_enable(down, false);
+		goto err_free;
+	}
+	tunnel->paths[TB_USB3_PATH_DOWN] = path;
+	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+				"USB3 Up");
+	if (!path)
+		goto err_deactivate;
+	tunnel->paths[TB_USB3_PATH_UP] = path;
+	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+	/* Validate that the tunnel is complete */
+	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+		tb_port_warn(tunnel->dst_port,
+			     "path does not end on an USB3 adapter, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (down != tunnel->src_port) {
+		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+		tb_tunnel_warn(tunnel,
+			       "tunnel is not fully activated, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (!tb_route(down->sw)) {
+		int ret;
+
+		/*
+		 * Read the initial bandwidth allocation for the first
+		 * hop tunnel.
+		 */
+		ret = usb4_usb3_port_allocated_bandwidth(down,
+			&tunnel->allocated_up, &tunnel->allocated_down);
+		if (ret)
+			goto err_deactivate;
+
+		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
+			      tunnel->allocated_up, tunnel->allocated_down);
+
+		tunnel->init = tb_usb3_init;
+		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
+		tunnel->release_unused_bandwidth =
+			tb_usb3_release_unused_bandwidth;
+		tunnel->reclaim_available_bandwidth =
+			tb_usb3_reclaim_available_bandwidth;
+	}
+
+	tb_tunnel_dbg(tunnel, "discovered\n");
+	return tunnel;
+
+err_deactivate:
+	tb_tunnel_deactivate(tunnel);
+err_free:
+	tb_tunnel_free(tunnel);
+
+	return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
+ *	    if not limited).
+ * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
+ *	      (%0 if not limited).
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+				       struct tb_port *down, int max_up,
+				       int max_down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+	int max_rate = 0;
+
+	/*
+	 * Check that we have enough bandwidth available for the new
+	 * USB3 tunnel.
+	 */
+	if (max_up > 0 || max_down > 0) {
+		max_rate = tb_usb3_max_link_rate(down, up);
+		if (max_rate < 0)
+			return NULL;
+
+		/* Only 90% can be allocated for USB3 isochronous transfers */
+		max_rate = max_rate * 90 / 100;
+		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
+			    max_rate);
+
+		if (max_rate > max_up || max_rate > max_down) {
+			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
+			return NULL;
+		}
+	}
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_usb3_activate;
+	tunnel->src_port = down;
+	tunnel->dst_port = up;
+	tunnel->max_up = max_up;
+	tunnel->max_down = max_down;
+
+	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+			     "USB3 Down");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_usb3_init_path(path);
+	tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+			     "USB3 Up");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_usb3_init_path(path);
+	tunnel->paths[TB_USB3_PATH_UP] = path;
+
+	if (!tb_route(down->sw)) {
+		tunnel->allocated_up = max_rate;
+		tunnel->allocated_down = max_rate;
+
+		tunnel->init = tb_usb3_init;
+		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
+		tunnel->release_unused_bandwidth =
+			tb_usb3_release_unused_bandwidth;
+		tunnel->reclaim_available_bandwidth =
+			tb_usb3_reclaim_available_bandwidth;
+	}
+
+	return tunnel;
+}
+
 /**
  * tb_tunnel_free() - free a tunnel
  * @tunnel: Tunnel to be freed
@@ -689,3 +1332,129 @@
 			tb_path_deactivate(tunnel->paths[i]);
 	}
 }
+
+/**
+ * tb_tunnel_port_on_path() - Does the tunnel go through port
+ * @tunnel: Tunnel to check
+ * @port: Port to check
+ *
+ * Returns true if @tunnel goes through @port (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
+			    const struct tb_port *port)
+{
+	int i;
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (!tunnel->paths[i])
+			continue;
+
+		if (tb_path_port_on_path(tunnel->paths[i], port))
+			return true;
+	}
+
+	return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+	int i;
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (!tunnel->paths[i])
+			return false;
+		if (!tunnel->paths[i]->activated)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
+ *		 Can be %NULL.
+ * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
+ *		   Can be %NULL.
+ *
+ * Stores the amount of isochronous bandwidth @tunnel consumes in
+ * @consumed_up and @consumed_down. In case of success returns %0,
+ * negative errno otherwise.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+				 int *consumed_down)
+{
+	int up_bw = 0, down_bw = 0;
+
+	if (!tb_tunnel_is_active(tunnel))
+		goto out;
+
+	if (tunnel->consumed_bandwidth) {
+		int ret;
+
+		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
+		if (ret)
+			return ret;
+
+		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
+			      down_bw);
+	}
+
+out:
+	if (consumed_up)
+		*consumed_up = up_bw;
+	if (consumed_down)
+		*consumed_down = down_bw;
+
+	return 0;
+}
+
+/**
+ * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
+ * @tunnel: Tunnel whose unused bandwidth to release
+ *
+ * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
+ * moment) this function makes it to release all the unused bandwidth.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
+ */
+int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
+{
+	if (!tb_tunnel_is_active(tunnel))
+		return 0;
+
+	if (tunnel->release_unused_bandwidth) {
+		int ret;
+
+		ret = tunnel->release_unused_bandwidth(tunnel);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
+ * @tunnel: Tunnel reclaiming available bandwidth
+ * @available_up: Available upstream bandwidth (in Mb/s)
+ * @available_down: Available downstream bandwidth (in Mb/s)
+ *
+ * Reclaims bandwidth from @available_up and @available_down and updates
+ * the variables accordingly (e.g decreases both according to what was
+ * reclaimed by the tunnel). If nothing was reclaimed the values are
+ * kept as is.
+ */
+void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+					   int *available_up,
+					   int *available_down)
+{
+	if (!tb_tunnel_is_active(tunnel))
+		return;
+
+	if (tunnel->reclaim_available_bandwidth)
+		tunnel->reclaim_available_bandwidth(tunnel, available_up,
+						    available_down);
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index c68bbcd..1d2a64e 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -15,6 +15,7 @@
 	TB_TUNNEL_PCI,
 	TB_TUNNEL_DP,
 	TB_TUNNEL_DMA,
+	TB_TUNNEL_USB3,
 };
 
 /**
@@ -27,8 +28,17 @@
  * @npaths: Number of paths in @paths
  * @init: Optional tunnel specific initialization
  * @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
+ * @release_unused_bandwidth: Release all unused bandwidth
+ * @reclaim_available_bandwidth: Reclaim back available bandwidth
  * @list: Tunnels are linked using this field
  * @type: Type of the tunnel
+ * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
+ *	    Only set if the bandwidth needs to be limited.
+ * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
+ *	      Only set if the bandwidth needs to be limited.
+ * @allocated_up: Allocated upstream bandwidth (only for USB3)
+ * @allocated_down: Allocated downstream bandwidth (only for USB3)
  */
 struct tb_tunnel {
 	struct tb *tb;
@@ -38,8 +48,18 @@
 	size_t npaths;
 	int (*init)(struct tb_tunnel *tunnel);
 	int (*activate)(struct tb_tunnel *tunnel, bool activate);
+	int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
+				  int *consumed_down);
+	int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
+	void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
+					    int *available_up,
+					    int *available_down);
 	struct list_head list;
 	enum tb_tunnel_type type;
+	int max_up;
+	int max_down;
+	int allocated_up;
+	int allocated_down;
 };
 
 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
@@ -47,17 +67,30 @@
 				      struct tb_port *down);
 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
-				     struct tb_port *out);
+				     struct tb_port *out, int max_up,
+				     int max_down);
 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 				      struct tb_port *dst, int transmit_ring,
 				      int transmit_path, int receive_ring,
 				      int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+				       struct tb_port *down, int max_up,
+				       int max_down);
 
 void tb_tunnel_free(struct tb_tunnel *tunnel);
 int tb_tunnel_activate(struct tb_tunnel *tunnel);
 int tb_tunnel_restart(struct tb_tunnel *tunnel);
 void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
+			    const struct tb_port *port);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+				 int *consumed_down);
+int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
+void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+					   int *available_up,
+					   int *available_down);
 
 static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
 {
@@ -74,5 +107,10 @@
 	return tunnel->type == TB_TUNNEL_DMA;
 }
 
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+	return tunnel->type == TB_TUNNEL_USB3;
+}
+
 #endif
 
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 0000000..c05ec6f
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *	    Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "sb_regs.h"
+#include "tb.h"
+
+#define USB4_DATA_DWORDS		16
+#define USB4_DATA_RETRIES		3
+
+enum usb4_switch_op {
+	USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+	USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+	USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+	USB4_SWITCH_OP_NVM_WRITE = 0x20,
+	USB4_SWITCH_OP_NVM_AUTH = 0x21,
+	USB4_SWITCH_OP_NVM_READ = 0x22,
+	USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+	USB4_SWITCH_OP_DROM_READ = 0x24,
+	USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+enum usb4_sb_target {
+	USB4_SB_TARGET_ROUTER,
+	USB4_SB_TARGET_PARTNER,
+	USB4_SB_TARGET_RETIMER,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT	2
+#define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT	24
+
+#define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT		2
+#define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT		15
+
+#define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
+
+typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(void *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+				    u32 value, int timeout_msec)
+{
+	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+	do {
+		u32 val;
+		int ret;
+
+		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+		if (ret)
+			return ret;
+
+		if ((val & bit) == value)
+			return 0;
+
+		usleep_range(50, 100);
+	} while (ktime_before(ktime_get(), timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+				    size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+				     size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+	return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+	return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_do_read_data(u16 address, void *buf, size_t size,
+			     read_block_fn read_block, void *read_block_data)
+{
+	unsigned int retries = USB4_DATA_RETRIES;
+	unsigned int offset;
+
+	do {
+		unsigned int dwaddress, dwords;
+		u8 data[USB4_DATA_DWORDS * 4];
+		size_t nbytes;
+		int ret;
+
+		offset = address & 3;
+		nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
+		dwaddress = address / 4;
+		dwords = ALIGN(nbytes, 4) / 4;
+
+		ret = read_block(read_block_data, dwaddress, data, dwords);
+		if (ret) {
+			if (ret != -ENODEV && retries--)
+				continue;
+			return ret;
+		}
+
+		nbytes -= offset;
+		memcpy(buf, data + offset, nbytes);
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
+	write_block_fn write_next_block, void *write_block_data)
+{
+	unsigned int retries = USB4_DATA_RETRIES;
+	unsigned int offset;
+
+	offset = address & 3;
+	address = address & ~3;
+
+	do {
+		u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+		u8 data[USB4_DATA_DWORDS * 4];
+		int ret;
+
+		memcpy(data + offset, buf, nbytes);
+
+		ret = write_next_block(write_block_data, data, nbytes / 4);
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+	u32 val;
+	int ret;
+
+	val = opcode | ROUTER_CS_26_OV;
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+	if (ret)
+		return ret;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+	if (ret)
+		return ret;
+
+	if (val & ROUTER_CS_26_ONS)
+		return -EOPNOTSUPP;
+
+	*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+	return 0;
+}
+
+static void usb4_switch_check_wakes(struct tb_switch *sw)
+{
+	struct tb_port *port;
+	bool wakeup = false;
+	u32 val;
+
+	if (!device_may_wakeup(&sw->dev))
+		return;
+
+	if (tb_route(sw)) {
+		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+			return;
+
+		tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
+			  (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
+			  (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
+
+		wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
+	}
+
+	/* Check for any connected downstream ports for USB4 wake */
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_has_remote(port))
+			continue;
+
+		if (tb_port_read(port, &val, TB_CFG_PORT,
+				 port->cap_usb4 + PORT_CS_18, 1))
+			break;
+
+		tb_port_dbg(port, "USB4 wake: %s\n",
+			    (val & PORT_CS_18_WOU4S) ? "yes" : "no");
+
+		if (val & PORT_CS_18_WOU4S)
+			wakeup = true;
+	}
+
+	if (wakeup)
+		pm_wakeup_event(&sw->dev, 0);
+}
+
+static bool link_is_usb4(struct tb_port *port)
+{
+	u32 val;
+
+	if (!port->cap_usb4)
+		return false;
+
+	if (tb_port_read(port, &val, TB_CFG_PORT,
+			 port->cap_usb4 + PORT_CS_18, 1))
+		return false;
+
+	return !(val & PORT_CS_18_TCM);
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+	struct tb_port *downstream_port;
+	struct tb_switch *parent;
+	bool tbt3, xhci;
+	u32 val = 0;
+	int ret;
+
+	usb4_switch_check_wakes(sw);
+
+	if (!tb_route(sw))
+		return 0;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+	if (ret)
+		return ret;
+
+	parent = tb_switch_parent(sw);
+	downstream_port = tb_port_at(tb_route(sw), parent);
+	sw->link_usb4 = link_is_usb4(downstream_port);
+	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
+
+	xhci = val & ROUTER_CS_6_HCI;
+	tbt3 = !(val & ROUTER_CS_6_TNS);
+
+	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+		val |= ROUTER_CS_5_UTO;
+		xhci = false;
+	}
+
+	/* Only enable PCIe tunneling if the parent router supports it */
+	if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+		val |= ROUTER_CS_5_PTO;
+		/*
+		 * xHCI can be enabled if PCIe tunneling is supported
+		 * and the parent does not have any USB3 dowstream
+		 * adapters (so we cannot do USB 3.x tunneling).
+		 */
+		if (xhci)
+			val |= ROUTER_CS_5_HCO;
+	}
+
+	/* TBT3 supported by the CM */
+	val |= ROUTER_CS_5_C3S;
+	/* Tunneling configuration is ready now */
+	val |= ROUTER_CS_5_CV;
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+					ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ * @uid: UID is stored here
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(void *data,
+				       unsigned int dwaddress, void *buf,
+				       size_t dwords)
+{
+	struct tb_switch *sw = data;
+	u8 status = 0;
+	u32 metadata;
+	int ret;
+
+	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+		USB4_DROM_ADDRESS_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return -EIO;
+
+	return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ * @address: Byte address inside DROM to start reading
+ * @buf: Buffer where the DROM content is stored
+ * @size: Number of bytes to read from DROM
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+			  size_t size)
+{
+	return usb4_do_read_data(address, buf, size,
+				 usb4_switch_drom_read_block, sw);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+	struct tb_port *up;
+	int ret;
+	u32 val;
+
+	up = tb_upstream_port(sw);
+	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+	if (ret)
+		return false;
+
+	return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_wake() - Enabled/disable wake
+ * @sw: USB4 router
+ * @flags: Wakeup flags (%0 to disable)
+ *
+ * Enables/disables router to wake up from sleep.
+ */
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+	struct tb_port *port;
+	u64 route = tb_route(sw);
+	u32 val;
+	int ret;
+
+	/*
+	 * Enable wakes coming from all USB4 downstream ports (from
+	 * child routers). For device routers do this also for the
+	 * upstream USB4 port.
+	 */
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_is_null(port))
+			continue;
+		if (!route && tb_is_upstream_port(port))
+			continue;
+		if (!port->cap_usb4)
+			continue;
+
+		ret = tb_port_read(port, &val, TB_CFG_PORT,
+				   port->cap_usb4 + PORT_CS_19, 1);
+		if (ret)
+			return ret;
+
+		val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
+
+		if (flags & TB_WAKE_ON_CONNECT)
+			val |= PORT_CS_19_WOC;
+		if (flags & TB_WAKE_ON_DISCONNECT)
+			val |= PORT_CS_19_WOD;
+		if (flags & TB_WAKE_ON_USB4)
+			val |= PORT_CS_19_WOU4;
+
+		ret = tb_port_write(port, &val, TB_CFG_PORT,
+				    port->cap_usb4 + PORT_CS_19, 1);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Enable wakes from PCIe and USB 3.x on this router. Only
+	 * needed for device routers.
+	 */
+	if (route) {
+		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+		if (ret)
+			return ret;
+
+		val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
+		if (flags & TB_WAKE_ON_USB3)
+			val |= ROUTER_CS_5_WOU;
+		if (flags & TB_WAKE_ON_PCIE)
+			val |= ROUTER_CS_5_WOP;
+
+		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Sets sleep bit for the router. Returns when the router sleep ready
+ * bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	/* Set sleep bit and wait for sleep ready to be asserted */
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	val |= ROUTER_CS_5_SLP;
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+					ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+	u32 metadata;
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+	ret = usb4_switch_op_read_metadata(sw, &metadata);
+	if (ret)
+		return ret;
+
+	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(void *data,
+	unsigned int dwaddress, void *buf, size_t dwords)
+{
+	struct tb_switch *sw = data;
+	u8 status = 0;
+	u32 metadata;
+	int ret;
+
+	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+		   USB4_NVM_READ_LENGTH_MASK;
+	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+		   USB4_NVM_READ_OFFSET_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return -EIO;
+
+	return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+			 size_t size)
+{
+	return usb4_do_read_data(address, buf, size,
+				 usb4_switch_nvm_read_block, sw);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+				      unsigned int address)
+{
+	u32 metadata, dwaddress;
+	u8 status = 0;
+	int ret;
+
+	dwaddress = address / 4;
+	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+		   USB4_NVM_SET_OFFSET_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+	if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
+					    size_t dwords)
+{
+	struct tb_switch *sw = data;
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_data(sw, buf, dwords);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+	if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+			  const void *buf, size_t size)
+{
+	int ret;
+
+	ret = usb4_switch_nvm_set_offset(sw, address);
+	if (ret)
+		return ret;
+
+	return usb4_do_write_data(address, buf, size,
+				  usb4_switch_nvm_write_next_block, sw);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+	u8 status = 0;
+	int ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+	if (ret)
+		return ret;
+
+	switch (status) {
+	case 0x0:
+		tb_sw_dbg(sw, "NVM authentication successful\n");
+		return 0;
+	case 0x1:
+		return -EINVAL;
+	case 0x2:
+		return -EAGAIN;
+	case 0x3:
+		return -EOPNOTSUPP;
+	default:
+		return -EIO;
+	}
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return false;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+	/*
+	 * If DP resource allocation is not supported assume it is
+	 * always available.
+	 */
+	if (ret == -EOPNOTSUPP)
+		return true;
+	else if (ret)
+		return false;
+
+	return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+	if (ret == -EOPNOTSUPP)
+		return 0;
+	else if (ret)
+		return ret;
+
+	return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+	if (ret == -EOPNOTSUPP)
+		return 0;
+	else if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+	struct tb_port *p;
+	int usb4_idx = 0;
+
+	/* Assume port is primary */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_null(p))
+			continue;
+		if (tb_is_upstream_port(p))
+			continue;
+		if (!p->link_nr) {
+			if (p == port)
+				break;
+			usb4_idx++;
+		}
+	}
+
+	return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+					  const struct tb_port *port)
+{
+	int usb4_idx = usb4_port_idx(sw, port);
+	struct tb_port *p;
+	int pcie_idx = 0;
+
+	/* Find PCIe down port matching usb4_port */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_pcie_down(p))
+			continue;
+
+		if (pcie_idx == usb4_idx)
+			return p;
+
+		pcie_idx++;
+	}
+
+	return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+					  const struct tb_port *port)
+{
+	int usb4_idx = usb4_port_idx(sw, port);
+	struct tb_port *p;
+	int usb_idx = 0;
+
+	/* Find USB3 down port matching usb4_port */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_usb3_down(p))
+			continue;
+
+		if (usb_idx == usb4_idx)
+			return p;
+
+		usb_idx++;
+	}
+
+	return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+	if (ret)
+		return ret;
+
+	val &= ~ADP_CS_4_LCK;
+	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
+
+static int usb4_port_set_configured(struct tb_port *port, bool configured)
+{
+	int ret;
+	u32 val;
+
+	if (!port->cap_usb4)
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_usb4 + PORT_CS_19, 1);
+	if (ret)
+		return ret;
+
+	if (configured)
+		val |= PORT_CS_19_PC;
+	else
+		val &= ~PORT_CS_19_PC;
+
+	return tb_port_write(port, &val, TB_CFG_PORT,
+			     port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure() - Set USB4 port configured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be configured for power management purposes.
+ */
+int usb4_port_configure(struct tb_port *port)
+{
+	return usb4_port_set_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure() - Set USB4 port unconfigured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be unconfigured for power management purposes.
+ */
+void usb4_port_unconfigure(struct tb_port *port)
+{
+	usb4_port_set_configured(port, false);
+}
+
+static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
+{
+	int ret;
+	u32 val;
+
+	if (!port->cap_usb4)
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_usb4 + PORT_CS_19, 1);
+	if (ret)
+		return ret;
+
+	if (configured)
+		val |= PORT_CS_19_PID;
+	else
+		val &= ~PORT_CS_19_PID;
+
+	return tb_port_write(port, &val, TB_CFG_PORT,
+			     port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure_xdomain() - Configure port for XDomain
+ * @port: USB4 port connected to another host
+ *
+ * Marks the USB4 port as being connected to another host. Returns %0 in
+ * success and negative errno in failure.
+ */
+int usb4_port_configure_xdomain(struct tb_port *port)
+{
+	return usb4_set_xdomain_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
+ * @port: USB4 port that was connected to another host
+ *
+ * Clears USB4 port from being marked as XDomain.
+ */
+void usb4_port_unconfigure_xdomain(struct tb_port *port)
+{
+	usb4_set_xdomain_configured(port, false);
+}
+
+static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
+				  u32 value, int timeout_msec)
+{
+	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+	do {
+		u32 val;
+		int ret;
+
+		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
+		if (ret)
+			return ret;
+
+		if ((val & bit) == value)
+			return 0;
+
+		usleep_range(50, 100);
+	} while (ktime_before(ktime_get(), timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
+			    dwords);
+}
+
+static int usb4_port_write_data(struct tb_port *port, const void *data,
+				size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
+			     dwords);
+}
+
+static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
+			     u8 index, u8 reg, void *buf, u8 size)
+{
+	size_t dwords = DIV_ROUND_UP(size, 4);
+	int ret;
+	u32 val;
+
+	if (!port->cap_usb4)
+		return -EINVAL;
+
+	val = reg;
+	val |= size << PORT_CS_1_LENGTH_SHIFT;
+	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
+	if (target == USB4_SB_TARGET_RETIMER)
+		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
+	val |= PORT_CS_1_PND;
+
+	ret = tb_port_write(port, &val, TB_CFG_PORT,
+			    port->cap_usb4 + PORT_CS_1, 1);
+	if (ret)
+		return ret;
+
+	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
+				     PORT_CS_1_PND, 0, 500);
+	if (ret)
+		return ret;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			    port->cap_usb4 + PORT_CS_1, 1);
+	if (ret)
+		return ret;
+
+	if (val & PORT_CS_1_NR)
+		return -ENODEV;
+	if (val & PORT_CS_1_RC)
+		return -EIO;
+
+	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
+}
+
+static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+			      u8 index, u8 reg, const void *buf, u8 size)
+{
+	size_t dwords = DIV_ROUND_UP(size, 4);
+	int ret;
+	u32 val;
+
+	if (!port->cap_usb4)
+		return -EINVAL;
+
+	if (buf) {
+		ret = usb4_port_write_data(port, buf, dwords);
+		if (ret)
+			return ret;
+	}
+
+	val = reg;
+	val |= size << PORT_CS_1_LENGTH_SHIFT;
+	val |= PORT_CS_1_WNR_WRITE;
+	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
+	if (target == USB4_SB_TARGET_RETIMER)
+		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
+	val |= PORT_CS_1_PND;
+
+	ret = tb_port_write(port, &val, TB_CFG_PORT,
+			    port->cap_usb4 + PORT_CS_1, 1);
+	if (ret)
+		return ret;
+
+	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
+				     PORT_CS_1_PND, 0, 500);
+	if (ret)
+		return ret;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			    port->cap_usb4 + PORT_CS_1, 1);
+	if (ret)
+		return ret;
+
+	if (val & PORT_CS_1_NR)
+		return -ENODEV;
+	if (val & PORT_CS_1_RC)
+		return -EIO;
+
+	return 0;
+}
+
+static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
+			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
+{
+	ktime_t timeout;
+	u32 val;
+	int ret;
+
+	val = opcode;
+	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
+				 sizeof(val));
+	if (ret)
+		return ret;
+
+	timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+	do {
+		/* Check results */
+		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
+					&val, sizeof(val));
+		if (ret)
+			return ret;
+
+		switch (val) {
+		case 0:
+			return 0;
+
+		case USB4_SB_OPCODE_ERR:
+			return -EAGAIN;
+
+		case USB4_SB_OPCODE_ONS:
+			return -EOPNOTSUPP;
+
+		default:
+			if (val != opcode)
+				return -EIO;
+			break;
+		}
+	} while (ktime_before(ktime_get(), timeout));
+
+	return -ETIMEDOUT;
+}
+
+/**
+ * usb4_port_enumerate_retimers() - Send RT broadcast transaction
+ * @port: USB4 port
+ *
+ * This forces the USB4 port to send broadcast RT transaction which
+ * makes the retimers on the link to assign index to themselves. Returns
+ * %0 in case of success and negative errno if there was an error.
+ */
+int usb4_port_enumerate_retimers(struct tb_port *port)
+{
+	u32 val;
+
+	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
+	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+				  USB4_SB_OPCODE, &val, sizeof(val));
+}
+
+static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
+				       enum usb4_sb_opcode opcode,
+				       int timeout_msec)
+{
+	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
+			       timeout_msec);
+}
+
+/**
+ * usb4_port_retimer_read() - Read from retimer sideband registers
+ * @port: USB4 port
+ * @index: Retimer index
+ * @reg: Sideband register to read
+ * @buf: Data from @reg is stored here
+ * @size: Number of bytes to read
+ *
+ * Function reads retimer sideband registers starting from @reg. The
+ * retimer is connected to @port at @index. Returns %0 in case of
+ * success, and read data is copied to @buf. If there is no retimer
+ * present at given @index returns %-ENODEV. In any other failure
+ * returns negative errno.
+ */
+int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+			   u8 size)
+{
+	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
+				 size);
+}
+
+/**
+ * usb4_port_retimer_write() - Write to retimer sideband registers
+ * @port: USB4 port
+ * @index: Retimer index
+ * @reg: Sideband register to write
+ * @buf: Data that is written starting from @reg
+ * @size: Number of bytes to write
+ *
+ * Writes retimer sideband registers starting from @reg. The retimer is
+ * connected to @port at @index. Returns %0 in case of success. If there
+ * is no retimer present at given @index returns %-ENODEV. In any other
+ * failure returns negative errno.
+ */
+int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+			    const void *buf, u8 size)
+{
+	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
+				  size);
+}
+
+/**
+ * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * If the retimer at @index is last one (connected directly to the
+ * Type-C port) this function returns %1. If it is not returns %0. If
+ * the retimer is not present returns %-ENODEV. Otherwise returns
+ * negative errno.
+ */
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
+{
+	u32 metadata;
+	int ret;
+
+	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+				   500);
+	if (ret)
+		return ret;
+
+	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
+				     sizeof(metadata));
+	return ret ? ret : metadata & 1;
+}
+
+/**
+ * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Reads NVM sector size (in bytes) of a retimer at @index. This
+ * operation can be used to determine whether the retimer supports NVM
+ * upgrade for example. Returns sector size in bytes or negative errno
+ * in case of error. Specifically returns %-ENODEV if there is no
+ * retimer at @index.
+ */
+int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
+{
+	u32 metadata;
+	int ret;
+
+	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
+				   500);
+	if (ret)
+		return ret;
+
+	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
+				     sizeof(metadata));
+	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
+					    unsigned int address)
+{
+	u32 metadata, dwaddress;
+	int ret;
+
+	dwaddress = address / 4;
+	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+		  USB4_NVM_SET_OFFSET_MASK;
+
+	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
+				      sizeof(metadata));
+	if (ret)
+		return ret;
+
+	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
+				    500);
+}
+
+struct retimer_info {
+	struct tb_port *port;
+	u8 index;
+};
+
+static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
+						  size_t dwords)
+
+{
+	const struct retimer_info *info = data;
+	struct tb_port *port = info->port;
+	u8 index = info->index;
+	int ret;
+
+	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
+				      buf, dwords * 4);
+	if (ret)
+		return ret;
+
+	return usb4_port_retimer_op(port, index,
+			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
+}
+
+/**
+ * usb4_port_retimer_nvm_write() - Write to retimer NVM
+ * @port: USB4 port
+ * @index: Retimer index
+ * @address: Byte address where to start the write
+ * @buf: Data to write
+ * @size: Size in bytes how much to write
+ *
+ * Writes @size bytes from @buf to the retimer NVM. Used for NVM
+ * upgrade. Returns %0 if the data was written successfully and negative
+ * errno in case of failure. Specifically returns %-ENODEV if there is
+ * no retimer at @index.
+ */
+int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
+				const void *buf, size_t size)
+{
+	struct retimer_info info = { .port = port, .index = index };
+	int ret;
+
+	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
+	if (ret)
+		return ret;
+
+	return usb4_do_write_data(address, buf, size,
+			usb4_port_retimer_nvm_write_next_block, &info);
+}
+
+/**
+ * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * After the new NVM image has been written via usb4_port_retimer_nvm_write()
+ * this function can be used to trigger the NVM upgrade process. If
+ * successful the retimer restarts with the new NVM and may not have the
+ * index set so one needs to call usb4_port_enumerate_retimers() to
+ * force index to be assigned.
+ */
+int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
+{
+	u32 val;
+
+	/*
+	 * We need to use the raw operation here because once the
+	 * authentication completes the retimer index is not set anymore
+	 * so we do not get back the status now.
+	 */
+	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
+	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+				  USB4_SB_OPCODE, &val, sizeof(val));
+}
+
+/**
+ * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
+ * @port: USB4 port
+ * @index: Retimer index
+ * @status: Raw status code read from metadata
+ *
+ * This can be called after usb4_port_retimer_nvm_authenticate() and
+ * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
+ *
+ * Returns %0 if the authentication status was successfully read. The
+ * completion metadata (the result) is then stored into @status. If
+ * reading the status fails, returns negative errno.
+ */
+int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
+					      u32 *status)
+{
+	u32 metadata, val;
+	int ret;
+
+	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
+				     sizeof(val));
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+		*status = 0;
+		return 0;
+
+	case USB4_SB_OPCODE_ERR:
+		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
+					     &metadata, sizeof(metadata));
+		if (ret)
+			return ret;
+
+		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
+		return 0;
+
+	case USB4_SB_OPCODE_ONS:
+		return -EOPNOTSUPP;
+
+	default:
+		return -EIO;
+	}
+}
+
+static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
+					    void *buf, size_t dwords)
+{
+	const struct retimer_info *info = data;
+	struct tb_port *port = info->port;
+	u8 index = info->index;
+	u32 metadata;
+	int ret;
+
+	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
+	if (dwords < USB4_DATA_DWORDS)
+		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
+
+	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
+				      sizeof(metadata));
+	if (ret)
+		return ret;
+
+	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
+	if (ret)
+		return ret;
+
+	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
+				      dwords * 4);
+}
+
+/**
+ * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
+ * @port: USB4 port
+ * @index: Retimer index
+ * @address: NVM address (in bytes) to start reading
+ * @buf: Data read from NVM is stored here
+ * @size: Number of bytes to read
+ *
+ * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
+ * read was successful and negative errno in case of failure.
+ * Specifically returns %-ENODEV if there is no retimer at @index.
+ */
+int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+			       unsigned int address, void *buf, size_t size)
+{
+	struct retimer_info info = { .port = port, .index = index };
+
+	return usb4_do_read_data(address, buf, size,
+			usb4_port_retimer_nvm_read_block, &info);
+}
+
+/**
+ * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
+ * @port: USB3 adapter port
+ *
+ * Return maximum supported link rate of a USB3 adapter in Mb/s.
+ * Negative errno in case of error.
+ */
+int usb4_usb3_port_max_link_rate(struct tb_port *port)
+{
+	int ret, lr;
+	u32 val;
+
+	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_4, 1);
+	if (ret)
+		return ret;
+
+	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
+	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+}
+
+/**
+ * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
+ * @port: USB3 adapter port
+ *
+ * Return actual established link rate of a USB3 adapter in Mb/s. If the
+ * link is not up returns %0 and negative errno in case of failure.
+ */
+int usb4_usb3_port_actual_link_rate(struct tb_port *port)
+{
+	int ret, lr;
+	u32 val;
+
+	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_4, 1);
+	if (ret)
+		return ret;
+
+	if (!(val & ADP_USB3_CS_4_ULV))
+		return 0;
+
+	lr = val & ADP_USB3_CS_4_ALR_MASK;
+	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+}
+
+static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
+{
+	int ret;
+	u32 val;
+
+	if (!tb_port_is_usb3_down(port))
+		return -EINVAL;
+	if (tb_route(port->sw))
+		return -EINVAL;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_2, 1);
+	if (ret)
+		return ret;
+
+	if (request)
+		val |= ADP_USB3_CS_2_CMR;
+	else
+		val &= ~ADP_USB3_CS_2_CMR;
+
+	ret = tb_port_write(port, &val, TB_CFG_PORT,
+			    port->cap_adap + ADP_USB3_CS_2, 1);
+	if (ret)
+		return ret;
+
+	/*
+	 * We can use val here directly as the CMR bit is in the same place
+	 * as HCA. Just mask out others.
+	 */
+	val &= ADP_USB3_CS_2_CMR;
+	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
+				      ADP_USB3_CS_1_HCA, val, 1500);
+}
+
+static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
+{
+	return usb4_usb3_port_cm_request(port, true);
+}
+
+static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
+{
+	return usb4_usb3_port_cm_request(port, false);
+}
+
+static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
+{
+	unsigned long uframes;
+
+	uframes = bw * 512UL << scale;
+	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
+}
+
+static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
+{
+	unsigned long uframes;
+
+	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
+	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
+	return DIV_ROUND_UP(uframes, 512UL << scale);
+}
+
+static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
+						   int *upstream_bw,
+						   int *downstream_bw)
+{
+	u32 val, bw, scale;
+	int ret;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_2, 1);
+	if (ret)
+		return ret;
+
+	ret = tb_port_read(port, &scale, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_3, 1);
+	if (ret)
+		return ret;
+
+	scale &= ADP_USB3_CS_3_SCALE_MASK;
+
+	bw = val & ADP_USB3_CS_2_AUBW_MASK;
+	*upstream_bw = usb3_bw_to_mbps(bw, scale);
+
+	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
+	*downstream_bw = usb3_bw_to_mbps(bw, scale);
+
+	return 0;
+}
+
+/**
+ * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
+ * @port: USB3 adapter port
+ * @upstream_bw: Allocated upstream bandwidth is stored here
+ * @downstream_bw: Allocated downstream bandwidth is stored here
+ *
+ * Stores currently allocated USB3 bandwidth into @upstream_bw and
+ * @downstream_bw in Mb/s. Returns %0 in case of success and negative
+ * errno in failure.
+ */
+int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
+				       int *downstream_bw)
+{
+	int ret;
+
+	ret = usb4_usb3_port_set_cm_request(port);
+	if (ret)
+		return ret;
+
+	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
+						      downstream_bw);
+	usb4_usb3_port_clear_cm_request(port);
+
+	return ret;
+}
+
+static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
+						  int *upstream_bw,
+						  int *downstream_bw)
+{
+	u32 val, bw, scale;
+	int ret;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_1, 1);
+	if (ret)
+		return ret;
+
+	ret = tb_port_read(port, &scale, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_3, 1);
+	if (ret)
+		return ret;
+
+	scale &= ADP_USB3_CS_3_SCALE_MASK;
+
+	bw = val & ADP_USB3_CS_1_CUBW_MASK;
+	*upstream_bw = usb3_bw_to_mbps(bw, scale);
+
+	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
+	*downstream_bw = usb3_bw_to_mbps(bw, scale);
+
+	return 0;
+}
+
+static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
+						    int upstream_bw,
+						    int downstream_bw)
+{
+	u32 val, ubw, dbw, scale;
+	int ret;
+
+	/* Read the used scale, hardware default is 0 */
+	ret = tb_port_read(port, &scale, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_3, 1);
+	if (ret)
+		return ret;
+
+	scale &= ADP_USB3_CS_3_SCALE_MASK;
+	ubw = mbps_to_usb3_bw(upstream_bw, scale);
+	dbw = mbps_to_usb3_bw(downstream_bw, scale);
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_adap + ADP_USB3_CS_2, 1);
+	if (ret)
+		return ret;
+
+	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
+	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
+	val |= ubw;
+
+	return tb_port_write(port, &val, TB_CFG_PORT,
+			     port->cap_adap + ADP_USB3_CS_2, 1);
+}
+
+/**
+ * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
+ * @port: USB3 adapter port
+ * @upstream_bw: New upstream bandwidth
+ * @downstream_bw: New downstream bandwidth
+ *
+ * This can be used to set how much bandwidth is allocated for the USB3
+ * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
+ * new values programmed to the USB3 adapter allocation registers. If
+ * the values are lower than what is currently consumed the allocation
+ * is set to what is currently consumed instead (consumed bandwidth
+ * cannot be taken away by CM). The actual new values are returned in
+ * @upstream_bw and @downstream_bw.
+ *
+ * Returns %0 in case of success and negative errno if there was a
+ * failure.
+ */
+int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
+				      int *downstream_bw)
+{
+	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
+
+	ret = usb4_usb3_port_set_cm_request(port);
+	if (ret)
+		return ret;
+
+	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
+						     &consumed_down);
+	if (ret)
+		goto err_request;
+
+	/* Don't allow it go lower than what is consumed */
+	allocate_up = max(*upstream_bw, consumed_up);
+	allocate_down = max(*downstream_bw, consumed_down);
+
+	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
+						       allocate_down);
+	if (ret)
+		goto err_request;
+
+	*upstream_bw = allocate_up;
+	*downstream_bw = allocate_down;
+
+err_request:
+	usb4_usb3_port_clear_cm_request(port);
+	return ret;
+}
+
+/**
+ * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
+ * @port: USB3 adapter port
+ * @upstream_bw: New allocated upstream bandwidth
+ * @downstream_bw: New allocated downstream bandwidth
+ *
+ * Releases USB3 allocated bandwidth down to what is actually consumed.
+ * The new bandwidth is returned in @upstream_bw and @downstream_bw.
+ *
+ * Returns 0% in success and negative errno in case of failure.
+ */
+int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
+				     int *downstream_bw)
+{
+	int ret, consumed_up, consumed_down;
+
+	ret = usb4_usb3_port_set_cm_request(port);
+	if (ret)
+		return ret;
+
+	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
+						     &consumed_down);
+	if (ret)
+		goto err_request;
+
+	/*
+	 * Always keep 1000 Mb/s to make sure xHCI has at least some
+	 * bandwidth available for isochronous traffic.
+	 */
+	if (consumed_up < 1000)
+		consumed_up = 1000;
+	if (consumed_down < 1000)
+		consumed_down = 1000;
+
+	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
+						       consumed_down);
+	if (ret)
+		goto err_request;
+
+	*upstream_bw = consumed_up;
+	*downstream_bw = consumed_down;
+
+err_request:
+	usb4_usb3_port_clear_cm_request(port);
+	return ret;
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9e9bf87..c00ad81 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -501,6 +501,55 @@
 }
 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
 
+static int rebuild_property_block(void)
+{
+	u32 *block, len;
+	int ret;
+
+	ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	len = ret;
+
+	block = kcalloc(len, sizeof(u32), GFP_KERNEL);
+	if (!block)
+		return -ENOMEM;
+
+	ret = tb_property_format_dir(xdomain_property_dir, block, len);
+	if (ret) {
+		kfree(block);
+		return ret;
+	}
+
+	kfree(xdomain_property_block);
+	xdomain_property_block = block;
+	xdomain_property_block_len = len;
+	xdomain_property_block_gen++;
+
+	return 0;
+}
+
+static void finalize_property_block(void)
+{
+	const struct tb_property *nodename;
+
+	/*
+	 * On first XDomain connection we set up the the system
+	 * nodename. This delayed here because userspace may not have it
+	 * set when the driver is first probed.
+	 */
+	mutex_lock(&xdomain_lock);
+	nodename = tb_property_find(xdomain_property_dir, "deviceid",
+				    TB_PROPERTY_TYPE_TEXT);
+	if (!nodename) {
+		tb_property_add_text(xdomain_property_dir, "deviceid",
+				     utsname()->nodename);
+		rebuild_property_block();
+	}
+	mutex_unlock(&xdomain_lock);
+}
+
 static void tb_xdp_handle_request(struct work_struct *work)
 {
 	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
@@ -529,6 +578,8 @@
 		goto out;
 	}
 
+	finalize_property_block();
+
 	switch (pkg->type) {
 	case PROPERTIES_REQUEST:
 		ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
@@ -1221,7 +1272,13 @@
 				    u64 route, const uuid_t *local_uuid,
 				    const uuid_t *remote_uuid)
 {
+	struct tb_switch *parent_sw = tb_to_switch(parent);
 	struct tb_xdomain *xd;
+	struct tb_port *down;
+
+	/* Make sure the downstream domain is accessible */
+	down = tb_port_at(route, parent_sw);
+	tb_port_unlock(down);
 
 	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
 	if (!xd)
@@ -1405,10 +1462,9 @@
 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
 	const struct tb_xdomain_lookup *lookup)
 {
-	int i;
+	struct tb_port *port;
 
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		struct tb_port *port = &sw->ports[i];
+	tb_switch_for_each_port(sw, port) {
 		struct tb_xdomain *xd;
 
 		if (port->xdomain) {
@@ -1565,35 +1621,6 @@
 	return ret > 0;
 }
 
-static int rebuild_property_block(void)
-{
-	u32 *block, len;
-	int ret;
-
-	ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
-	if (ret < 0)
-		return ret;
-
-	len = ret;
-
-	block = kcalloc(len, sizeof(u32), GFP_KERNEL);
-	if (!block)
-		return -ENOMEM;
-
-	ret = tb_property_format_dir(xdomain_property_dir, block, len);
-	if (ret) {
-		kfree(block);
-		return ret;
-	}
-
-	kfree(xdomain_property_block);
-	xdomain_property_block = block;
-	xdomain_property_block_len = len;
-	xdomain_property_block_gen++;
-
-	return 0;
-}
-
 static int update_xdomain(struct device *dev, void *data)
 {
 	struct tb_xdomain *xd;
@@ -1698,8 +1725,6 @@
 
 int tb_xdomain_init(void)
 {
-	int ret;
-
 	xdomain_property_dir = tb_property_create_dir(NULL);
 	if (!xdomain_property_dir)
 		return -ENOMEM;
@@ -1708,22 +1733,16 @@
 	 * Initialize standard set of properties without any service
 	 * directories. Those will be added by service drivers
 	 * themselves when they are loaded.
+	 *
+	 * We also add node name later when first connection is made.
 	 */
 	tb_property_add_immediate(xdomain_property_dir, "vendorid",
 				  PCI_VENDOR_ID_INTEL);
 	tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
 	tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
-	tb_property_add_text(xdomain_property_dir, "deviceid",
-			     utsname()->nodename);
 	tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
 
-	ret = rebuild_property_block();
-	if (ret) {
-		tb_property_free_dir(xdomain_property_dir);
-		xdomain_property_dir = NULL;
-	}
-
-	return ret;
+	return 0;
 }
 
 void tb_xdomain_exit(void)