Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index f4869c3..fd9adca 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menuconfig THUNDERBOLT
 	tristate "Thunderbolt support"
 	depends on PCI
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index f2f0de2..001187c 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
+thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index c2277b8..8bf8e03 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - capabilities lookup
+ * Thunderbolt driver - capabilities lookup
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #include <linux/slab.h>
@@ -12,6 +13,7 @@
 
 #define CAP_OFFSET_MAX		0xff
 #define VSE_CAP_OFFSET_MAX	0xffff
+#define TMU_ACCESS_EN		BIT(20)
 
 struct tb_cap_any {
 	union {
@@ -21,28 +23,53 @@
 	};
 } __packed;
 
-/**
- * tb_port_find_cap() - Find port capability
- * @port: Port to find the capability for
- * @cap: Capability to look
- *
- * Returns offset to start of capability or %-ENOENT if no such
- * capability was found. Negative errno is returned if there was an
- * error.
- */
-int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+static int tb_port_enable_tmu(struct tb_port *port, bool enable)
 {
-	u32 offset;
+	struct tb_switch *sw = port->sw;
+	u32 value, offset;
+	int ret;
 
 	/*
-	 * DP out adapters claim to implement TMU capability but in
-	 * reality they do not so we hard code the adapter specific
-	 * capability offset here.
+	 * Legacy devices need to have TMU access enabled before port
+	 * space can be fully accessed.
 	 */
-	if (port->config.type == TB_TYPE_DP_HDMI_OUT)
-		offset = 0x39;
+	if (tb_switch_is_lr(sw))
+		offset = 0x26;
+	else if (tb_switch_is_er(sw))
+		offset = 0x2a;
 	else
-		offset = 0x1;
+		return 0;
+
+	ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
+	if (ret)
+		return ret;
+
+	if (enable)
+		value |= TMU_ACCESS_EN;
+	else
+		value &= ~TMU_ACCESS_EN;
+
+	return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
+}
+
+static void tb_port_dummy_read(struct tb_port *port)
+{
+	/*
+	 * When reading from next capability pointer location in port
+	 * config space the read data is not cleared on LR. To avoid
+	 * reading stale data on next read perform one dummy read after
+	 * port capabilities are walked.
+	 */
+	if (tb_switch_is_lr(port->sw)) {
+		u32 dummy;
+
+		tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
+	}
+}
+
+static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+	u32 offset = 1;
 
 	do {
 		struct tb_cap_any header;
@@ -61,6 +88,31 @@
 	return -ENOENT;
 }
 
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+	int ret;
+
+	ret = tb_port_enable_tmu(port, true);
+	if (ret)
+		return ret;
+
+	ret = __tb_port_find_cap(port, cap);
+
+	tb_port_dummy_read(port);
+	tb_port_enable_tmu(port, false);
+
+	return ret;
+}
+
 static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
 {
 	int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 37a7f4c..2ec1af8 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #include <linux/crc32.h>
@@ -631,7 +632,7 @@
 		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
 	}
 
-	tb_ctl_info(ctl, "control channel created\n");
+	tb_ctl_dbg(ctl, "control channel created\n");
 	return ctl;
 err:
 	tb_ctl_free(ctl);
@@ -662,8 +663,7 @@
 		tb_ctl_pkg_free(ctl->rx_packets[i]);
 
 
-	if (ctl->frame_pool)
-		dma_pool_destroy(ctl->frame_pool);
+	dma_pool_destroy(ctl->frame_pool);
 	kfree(ctl);
 }
 
@@ -673,7 +673,7 @@
 void tb_ctl_start(struct tb_ctl *ctl)
 {
 	int i;
-	tb_ctl_info(ctl, "control channel starting...\n");
+	tb_ctl_dbg(ctl, "control channel starting...\n");
 	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
 	tb_ring_start(ctl->rx);
 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -702,7 +702,7 @@
 	if (!list_empty(&ctl->request_queue))
 		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
 	INIT_LIST_HEAD(&ctl->request_queue);
-	tb_ctl_info(ctl, "control channel stopped\n");
+	tb_ctl_dbg(ctl, "control channel stopped\n");
 }
 
 /* public interface, commands */
@@ -720,7 +720,7 @@
 		.port = port,
 		.error = error,
 	};
-	tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
+	tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
 }
 
@@ -930,6 +930,23 @@
 	return res;
 }
 
+static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
+			    const struct tb_cfg_result *res)
+{
+	/*
+	 * For unimplemented ports access to port config space may return
+	 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
+	 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
+	 * that the caller can mark the port as disabled.
+	 */
+	if (space == TB_CFG_PORT &&
+	    res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
+		return -ENODEV;
+
+	tb_cfg_print_error(ctl, res);
+	return -EIO;
+}
+
 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
 		enum tb_cfg_space space, u32 offset, u32 length)
 {
@@ -942,8 +959,7 @@
 
 	case 1:
 		/* Thunderbolt error, tb_error holds the actual number */
-		tb_cfg_print_error(ctl, &res);
-		return -EIO;
+		return tb_cfg_get_error(ctl, space, &res);
 
 	case -ETIMEDOUT:
 		tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
@@ -969,8 +985,7 @@
 
 	case 1:
 		/* Thunderbolt error, tb_error holds the actual number */
-		tb_cfg_print_error(ctl, &res);
-		return -EIO;
+		return tb_cfg_get_error(ctl, space, &res);
 
 	case -ETIMEDOUT:
 		tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 3062e0b..2f1a1e1 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,8 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #ifndef _TB_CFG
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index f270119..847dd07 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Thunderbolt DMA configuration based mailbox support
  *
  * Copyright (C) 2017, Intel Corporation
  * Authors: Michael Jamet <michael.jamet@intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/delay.h>
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
index c4a69e0..7deadd9 100644
--- a/drivers/thunderbolt/dma_port.h
+++ b/drivers/thunderbolt/dma_port.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Thunderbolt DMA configuration based mailbox support
  *
  * Copyright (C) 2017, Intel Corporation
  * Authors: Michael Jamet <michael.jamet@intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #ifndef DMA_PORT_H_
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 092381e..b7980c8 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -1,16 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Thunderbolt bus support
  *
  * Copyright (C) 2017, Intel Corporation
- * Author:  Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  */
 
 #include <linux/device.h>
+#include <linux/dmar.h>
 #include <linux/idr.h>
+#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
@@ -239,6 +238,20 @@
 }
 static DEVICE_ATTR_RW(boot_acl);
 
+static ssize_t iommu_dma_protection_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	/*
+	 * Kernel DMA protection is a feature where Thunderbolt security is
+	 * handled natively using IOMMU. It is enabled when IOMMU is
+	 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
+	 */
+	return sprintf(buf, "%d\n",
+		       iommu_present(&pci_bus_type) && dmar_platform_optin());
+}
+static DEVICE_ATTR_RO(iommu_dma_protection);
+
 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
@@ -254,6 +267,7 @@
 
 static struct attribute *domain_attrs[] = {
 	&dev_attr_boot_acl.attr,
+	&dev_attr_iommu_dma_protection.attr,
 	&dev_attr_security.attr,
 	NULL,
 };
@@ -664,7 +678,6 @@
 	}
 
 	shash->tfm = tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	memset(hmac, 0, sizeof(hmac));
 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 3e8caf2..ee51964 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - eeprom access
+ * Thunderbolt driver - eeprom access
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #include <linux/crc32.h>
@@ -413,7 +414,7 @@
 	struct device *dev = &sw->tb->nhi->pdev->dev;
 	int len, res;
 
-	len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+	len = device_property_count_u8(dev, "ThunderboltDROM");
 	if (len < 0 || len < sizeof(struct tb_drom_header))
 		return -EINVAL;
 
@@ -524,10 +525,6 @@
 		sw->ports[3].dual_link_port = &sw->ports[4];
 		sw->ports[4].dual_link_port = &sw->ports[3];
 
-		/* Port 5 is inaccessible on this gen 1 controller */
-		if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
-			sw->ports[5].disabled = true;
-
 		return 0;
 	}
 
@@ -540,7 +537,7 @@
 		return res;
 	size &= 0x3ff;
 	size += TB_DROM_DATA_START;
-	tb_sw_info(sw, "reading drom (length: %#x)\n", size);
+	tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
 	if (size < sizeof(*header)) {
 		tb_sw_warn(sw, "drom too small, aborting\n");
 		return -EIO;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 28fc4ce..245588f 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Internal Thunderbolt Connection Manager. This is a firmware running on
  * the Thunderbolt host controller performing most of the low-level
@@ -6,10 +7,6 @@
  * Copyright (C) 2017, Intel Corporation
  * Authors: Michael Jamet <michael.jamet@intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/delay.h>
@@ -45,7 +42,6 @@
 #define ICM_TIMEOUT			5000	/* ms */
 #define ICM_APPROVE_TIMEOUT		10000	/* ms */
 #define ICM_MAX_LINK			4
-#define ICM_MAX_DEPTH			6
 
 /**
  * struct icm - Internal connection manager private data
@@ -59,15 +55,20 @@
  * @safe_mode: ICM is in safe mode
  * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
  * @rpm: Does the controller support runtime PM (RTD3)
+ * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
+ * @veto: Is RTD3 veto in effect
  * @is_supported: Checks if we can support ICM on this controller
+ * @cio_reset: Trigger CIO reset
  * @get_mode: Read and return the ICM firmware mode (optional)
  * @get_route: Find a route string for given switch
  * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
  * @driver_ready: Send driver ready message to ICM
+ * @set_uuid: Set UUID for the root switch (optional)
  * @device_connected: Handle device connected ICM message
  * @device_disconnected: Handle device disconnected ICM message
  * @xdomain_connected - Handle XDomain connected ICM message
  * @xdomain_disconnected - Handle XDomain disconnected ICM message
+ * @rtd3_veto: Handle RTD3 veto notification ICM message
  */
 struct icm {
 	struct mutex request_lock;
@@ -77,13 +78,17 @@
 	int vnd_cap;
 	bool safe_mode;
 	bool rpm;
+	bool can_upgrade_nvm;
+	bool veto;
 	bool (*is_supported)(struct tb *tb);
+	int (*cio_reset)(struct tb *tb);
 	int (*get_mode)(struct tb *tb);
 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
 	void (*save_devices)(struct tb *tb);
 	int (*driver_ready)(struct tb *tb,
 			    enum tb_security_level *security_level,
 			    size_t *nboot_acl, bool *rpm);
+	void (*set_uuid)(struct tb *tb);
 	void (*device_connected)(struct tb *tb,
 				 const struct icm_pkg_header *hdr);
 	void (*device_disconnected)(struct tb *tb,
@@ -92,6 +97,7 @@
 				  const struct icm_pkg_header *hdr);
 	void (*xdomain_disconnected)(struct tb *tb,
 				     const struct icm_pkg_header *hdr);
+	void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
 };
 
 struct icm_notification {
@@ -170,6 +176,65 @@
 	return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
 }
 
+static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
+	u32 cmd;
+
+	do {
+		pci_read_config_dword(icm->upstream_port,
+				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
+		if (!(cmd & PCIE2CIO_CMD_START)) {
+			if (cmd & PCIE2CIO_CMD_TIMEOUT)
+				break;
+			return 0;
+		}
+
+		msleep(50);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+}
+
+static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
+			 unsigned int port, unsigned int index, u32 *data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int ret, vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	ret = pci2cio_wait_completion(icm, 5000);
+	if (ret)
+		return ret;
+
+	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
+	return 0;
+}
+
+static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
+			  unsigned int port, unsigned int index, u32 data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	return pci2cio_wait_completion(icm, 5000);
+}
+
 static bool icm_match(const struct tb_cfg_request *req,
 		      const struct ctl_pkg *pkg)
 {
@@ -237,6 +302,43 @@
 	return -ETIMEDOUT;
 }
 
+/*
+ * If rescan is queued to run (we are resuming), postpone it to give the
+ * firmware some more time to send device connected notifications for next
+ * devices in the chain.
+ */
+static void icm_postpone_rescan(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (delayed_work_pending(&icm->rescan_work))
+		mod_delayed_work(tb->wq, &icm->rescan_work,
+				 msecs_to_jiffies(500));
+}
+
+static void icm_veto_begin(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (!icm->veto) {
+		icm->veto = true;
+		/* Keep the domain powered while veto is in effect */
+		pm_runtime_get(&tb->dev);
+	}
+}
+
+static void icm_veto_end(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (icm->veto) {
+		icm->veto = false;
+		/* Allow the domain suspend now */
+		pm_runtime_mark_last_busy(&tb->dev);
+		pm_runtime_put_autosuspend(&tb->dev);
+	}
+}
+
 static bool icm_fr_is_supported(struct tb *tb)
 {
 	return !x86_apple_machine;
@@ -460,22 +562,29 @@
 	return 0;
 }
 
-static void add_switch(struct tb_switch *parent_sw, u64 route,
-		       const uuid_t *uuid, const u8 *ep_name,
-		       size_t ep_name_size, u8 connection_id, u8 connection_key,
-		       u8 link, u8 depth, enum tb_security_level security_level,
-		       bool authorized, bool boot)
+static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
+				    const uuid_t *uuid, const u8 *ep_name,
+				    size_t ep_name_size, u8 connection_id,
+				    u8 connection_key, u8 link, u8 depth,
+				    enum tb_security_level security_level,
+				    bool authorized, bool boot)
 {
 	const struct intel_vss *vss;
 	struct tb_switch *sw;
+	int ret;
 
 	pm_runtime_get_sync(&parent_sw->dev);
 
 	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
-	if (!sw)
+	if (IS_ERR(sw))
 		goto out;
 
 	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+	if (!sw->uuid) {
+		tb_sw_warn(sw, "cannot allocate memory for switch\n");
+		tb_switch_put(sw);
+		goto out;
+	}
 	sw->connection_id = connection_id;
 	sw->connection_key = connection_key;
 	sw->link = link;
@@ -483,6 +592,7 @@
 	sw->authorized = authorized;
 	sw->security_level = security_level;
 	sw->boot = boot;
+	init_completion(&sw->rpm_complete);
 
 	vss = parse_intel_vss(ep_name, ep_name_size);
 	if (vss)
@@ -492,14 +602,18 @@
 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
 
-	if (tb_switch_add(sw)) {
+	ret = tb_switch_add(sw);
+	if (ret) {
 		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 		tb_switch_put(sw);
+		sw = ERR_PTR(ret);
 	}
 
 out:
 	pm_runtime_mark_last_busy(&parent_sw->dev);
 	pm_runtime_put_autosuspend(&parent_sw->dev);
+
+	return sw;
 }
 
 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -522,6 +636,9 @@
 
 	/* This switch still exists */
 	sw->is_unplugged = false;
+
+	/* Runtime resume is now complete */
+	complete(&sw->rpm_complete);
 }
 
 static void remove_switch(struct tb_switch *sw)
@@ -588,6 +705,8 @@
 	u64 route;
 	int ret;
 
+	icm_postpone_rescan(tb);
+
 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 		ICM_LINK_INFO_DEPTH_SHIFT;
@@ -712,7 +831,7 @@
 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 		ICM_LINK_INFO_DEPTH_SHIFT;
 
-	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 		return;
 	}
@@ -742,7 +861,7 @@
 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 		ICM_LINK_INFO_DEPTH_SHIFT;
 
-	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 		return;
 	}
@@ -796,9 +915,11 @@
 	 * connected another host to the same port, remove the switch
 	 * first.
 	 */
-	sw = get_switch_at_route(tb->root_switch, route);
-	if (sw)
+	sw = tb_switch_find_by_route(tb, route);
+	if (sw) {
 		remove_switch(sw);
+		tb_switch_put(sw);
+	}
 
 	sw = tb_switch_find_by_link_depth(tb, link, depth);
 	if (!sw) {
@@ -831,6 +952,11 @@
 	}
 }
 
+static int icm_tr_cio_reset(struct tb *tb)
+{
+	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
+}
+
 static int
 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 		    size_t *nboot_acl, bool *rpm)
@@ -1011,7 +1137,8 @@
 }
 
 static void
-icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
+			  bool force_rtd3)
 {
 	const struct icm_tr_event_device_connected *pkg =
 		(const struct icm_tr_event_device_connected *)hdr;
@@ -1021,6 +1148,8 @@
 	bool authorized, boot;
 	u64 route;
 
+	icm_postpone_rescan(tb);
+
 	/*
 	 * Currently we don't use the QoS information coming with the
 	 * device connected message so simply just ignore that extra
@@ -1076,14 +1205,22 @@
 		return;
 	}
 
-	add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
-		   sizeof(pkg->ep_name), pkg->connection_id,
-		   0, 0, 0, security_level, authorized, boot);
+	sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+			sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
+			security_level, authorized, boot);
+	if (!IS_ERR(sw) && force_rtd3)
+		sw->rpm = true;
 
 	tb_switch_put(parent_sw);
 }
 
 static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	__icm_tr_device_connected(tb, hdr, false);
+}
+
+static void
 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 {
 	const struct icm_tr_event_device_disconnected *pkg =
@@ -1141,9 +1278,11 @@
 	 * connected another host to the same port, remove the switch
 	 * first.
 	 */
-	sw = get_switch_at_route(tb->root_switch, route);
-	if (sw)
+	sw = tb_switch_find_by_route(tb, route);
+	if (sw) {
 		remove_switch(sw);
+		tb_switch_put(sw);
+	}
 
 	sw = tb_switch_find_by_route(tb, get_parent_route(route));
 	if (!sw) {
@@ -1194,6 +1333,8 @@
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
 		return parent;
 	}
 
@@ -1233,6 +1374,11 @@
 	return false;
 }
 
+static int icm_ar_cio_reset(struct tb *tb)
+{
+	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
+}
+
 static int icm_ar_get_mode(struct tb *tb)
 {
 	struct tb_nhi *nhi = tb->nhi;
@@ -1384,6 +1530,61 @@
 	return 0;
 }
 
+static int
+icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl, bool *rpm)
+{
+	struct icm_tr_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, 20000);
+	if (ret)
+		return ret;
+
+	/* Ice Lake always supports RTD3 */
+	if (rpm)
+		*rpm = true;
+
+	return 0;
+}
+
+static void icm_icl_set_uuid(struct tb *tb)
+{
+	struct tb_nhi *nhi = tb->nhi;
+	u32 uuid[4];
+
+	pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
+	pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
+	uuid[2] = 0xffffffff;
+	uuid[3] = 0xffffffff;
+
+	tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static void
+icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	__icm_tr_device_connected(tb, hdr, true);
+}
+
+static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_icl_event_rtd3_veto *pkg =
+		(const struct icm_icl_event_rtd3_veto *)hdr;
+
+	tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
+
+	if (pkg->veto_reason)
+		icm_veto_begin(tb);
+	else
+		icm_veto_end(tb);
+}
+
 static void icm_handle_notification(struct work_struct *work)
 {
 	struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -1411,6 +1612,9 @@
 		case ICM_EVENT_XDOMAIN_DISCONNECTED:
 			icm->xdomain_disconnected(tb, n->pkg);
 			break;
+		case ICM_EVENT_RTD3_VETO:
+			icm->rtd3_veto(tb, n->pkg);
+			break;
 		}
 	}
 
@@ -1470,65 +1674,6 @@
 	return -ETIMEDOUT;
 }
 
-static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
-{
-	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
-	u32 cmd;
-
-	do {
-		pci_read_config_dword(icm->upstream_port,
-				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
-		if (!(cmd & PCIE2CIO_CMD_START)) {
-			if (cmd & PCIE2CIO_CMD_TIMEOUT)
-				break;
-			return 0;
-		}
-
-		msleep(50);
-	} while (time_before(jiffies, end));
-
-	return -ETIMEDOUT;
-}
-
-static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
-			 unsigned int port, unsigned int index, u32 *data)
-{
-	struct pci_dev *pdev = icm->upstream_port;
-	int ret, vnd_cap = icm->vnd_cap;
-	u32 cmd;
-
-	cmd = index;
-	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
-	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
-	cmd |= PCIE2CIO_CMD_START;
-	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
-
-	ret = pci2cio_wait_completion(icm, 5000);
-	if (ret)
-		return ret;
-
-	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
-	return 0;
-}
-
-static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
-			  unsigned int port, unsigned int index, u32 data)
-{
-	struct pci_dev *pdev = icm->upstream_port;
-	int vnd_cap = icm->vnd_cap;
-	u32 cmd;
-
-	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
-
-	cmd = index;
-	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
-	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
-	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
-	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
-
-	return pci2cio_wait_completion(icm, 5000);
-}
-
 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
 {
 	struct icm *icm = tb_priv(tb);
@@ -1549,7 +1694,7 @@
 	iowrite32(val, nhi->iobase + REG_FW_STS);
 
 	/* Trigger CIO reset now */
-	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
+	return icm->cio_reset(tb);
 }
 
 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
@@ -1563,7 +1708,7 @@
 	if (val & REG_FW_STS_ICM_EN)
 		return 0;
 
-	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
+	dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
 
 	ret = icm_firmware_reset(tb, nhi);
 	if (ret)
@@ -1756,19 +1901,39 @@
 	for (i = 1; i <= sw->config.max_port_number; i++) {
 		struct tb_port *port = &sw->ports[i];
 
-		if (tb_is_upstream_port(port))
-			continue;
-		if (port->xdomain) {
+		if (port->xdomain)
 			port->xdomain->is_unplugged = true;
-			continue;
-		}
-		if (!port->remote)
-			continue;
-
-		icm_unplug_children(port->remote->sw);
+		else if (tb_port_has_remote(port))
+			icm_unplug_children(port->remote->sw);
 	}
 }
 
+static int complete_rpm(struct device *dev, void *data)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	if (sw)
+		complete(&sw->rpm_complete);
+	return 0;
+}
+
+static void remove_unplugged_switch(struct tb_switch *sw)
+{
+	pm_runtime_get_sync(sw->dev.parent);
+
+	/*
+	 * Signal this and switches below for rpm_complete because
+	 * tb_switch_remove() calls pm_runtime_get_sync() that then waits
+	 * for it.
+	 */
+	complete_rpm(&sw->dev, NULL);
+	bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
+	tb_switch_remove(sw);
+
+	pm_runtime_mark_last_busy(sw->dev.parent);
+	pm_runtime_put_autosuspend(sw->dev.parent);
+}
+
 static void icm_free_unplugged_children(struct tb_switch *sw)
 {
 	unsigned int i;
@@ -1776,23 +1941,16 @@
 	for (i = 1; i <= sw->config.max_port_number; i++) {
 		struct tb_port *port = &sw->ports[i];
 
-		if (tb_is_upstream_port(port))
-			continue;
-
 		if (port->xdomain && port->xdomain->is_unplugged) {
 			tb_xdomain_remove(port->xdomain);
 			port->xdomain = NULL;
-			continue;
-		}
-
-		if (!port->remote)
-			continue;
-
-		if (port->remote->sw->is_unplugged) {
-			tb_switch_remove(port->remote->sw);
-			port->remote = NULL;
-		} else {
-			icm_free_unplugged_children(port->remote->sw);
+		} else if (tb_port_has_remote(port)) {
+			if (port->remote->sw->is_unplugged) {
+				remove_unplugged_switch(port->remote->sw);
+				port->remote = NULL;
+			} else {
+				icm_free_unplugged_children(port->remote->sw);
+			}
 		}
 	}
 }
@@ -1815,6 +1973,13 @@
 	if (tb->nhi->going_away)
 		return;
 
+	/*
+	 * If RTD3 was vetoed before we entered system suspend allow it
+	 * again now before driver ready is sent. Firmware sends a new RTD3
+	 * veto if it is still the case after we have sent it driver ready
+	 * command.
+	 */
+	icm_veto_end(tb);
 	icm_unplug_children(tb->root_switch);
 
 	/*
@@ -1837,6 +2002,24 @@
 	return 0;
 }
 
+static int icm_runtime_suspend_switch(struct tb_switch *sw)
+{
+	if (tb_route(sw))
+		reinit_completion(&sw->rpm_complete);
+	return 0;
+}
+
+static int icm_runtime_resume_switch(struct tb_switch *sw)
+{
+	if (tb_route(sw)) {
+		if (!wait_for_completion_timeout(&sw->rpm_complete,
+						 msecs_to_jiffies(500))) {
+			dev_dbg(&sw->dev, "runtime resuming timed out\n");
+		}
+	}
+	return 0;
+}
+
 static int icm_runtime_resume(struct tb *tb)
 {
 	/*
@@ -1856,17 +2039,15 @@
 		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
 	else
 		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
-	if (!tb->root_switch)
-		return -ENODEV;
+	if (IS_ERR(tb->root_switch))
+		return PTR_ERR(tb->root_switch);
 
-	/*
-	 * NVM upgrade has not been tested on Apple systems and they
-	 * don't provide images publicly either. To be on the safe side
-	 * prevent root switch NVM upgrade on Macs for now.
-	 */
-	tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+	tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
 	tb->root_switch->rpm = icm->rpm;
 
+	if (icm->set_uuid)
+		icm->set_uuid(tb);
+
 	ret = tb_switch_add(tb->root_switch);
 	if (ret) {
 		tb_switch_put(tb->root_switch);
@@ -1916,6 +2097,8 @@
 	.complete = icm_complete,
 	.runtime_suspend = icm_runtime_suspend,
 	.runtime_resume = icm_runtime_resume,
+	.runtime_suspend_switch = icm_runtime_suspend_switch,
+	.runtime_resume_switch = icm_runtime_resume_switch,
 	.handle_event = icm_handle_event,
 	.get_boot_acl = icm_ar_get_boot_acl,
 	.set_boot_acl = icm_ar_set_boot_acl,
@@ -1936,6 +2119,8 @@
 	.complete = icm_complete,
 	.runtime_suspend = icm_runtime_suspend,
 	.runtime_resume = icm_runtime_resume,
+	.runtime_suspend_switch = icm_runtime_suspend_switch,
+	.runtime_resume_switch = icm_runtime_resume_switch,
 	.handle_event = icm_handle_event,
 	.get_boot_acl = icm_ar_get_boot_acl,
 	.set_boot_acl = icm_ar_set_boot_acl,
@@ -1947,6 +2132,19 @@
 	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
 };
 
+/* Ice Lake */
+static const struct tb_cm_ops icm_icl_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.complete = icm_complete,
+	.runtime_suspend = icm_runtime_suspend,
+	.runtime_resume = icm_runtime_resume,
+	.handle_event = icm_handle_event,
+	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
 struct tb *icm_probe(struct tb_nhi *nhi)
 {
 	struct icm *icm;
@@ -1963,6 +2161,7 @@
 	switch (nhi->pdev->device) {
 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+		icm->can_upgrade_nvm = true;
 		icm->is_supported = icm_fr_is_supported;
 		icm->get_route = icm_fr_get_route;
 		icm->save_devices = icm_fr_save_devices;
@@ -1980,7 +2179,15 @@
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
 		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+		/*
+		 * NVM upgrade has not been tested on Apple systems and
+		 * they don't provide images publicly either. To be on
+		 * the safe side prevent root switch NVM upgrade on Macs
+		 * for now.
+		 */
+		icm->can_upgrade_nvm = !x86_apple_machine;
 		icm->is_supported = icm_ar_is_supported;
+		icm->cio_reset = icm_ar_cio_reset;
 		icm->get_mode = icm_ar_get_mode;
 		icm->get_route = icm_ar_get_route;
 		icm->save_devices = icm_fr_save_devices;
@@ -1995,7 +2202,9 @@
 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
 		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+		icm->can_upgrade_nvm = !x86_apple_machine;
 		icm->is_supported = icm_ar_is_supported;
+		icm->cio_reset = icm_tr_cio_reset;
 		icm->get_mode = icm_ar_get_mode;
 		icm->driver_ready = icm_tr_driver_ready;
 		icm->device_connected = icm_tr_device_connected;
@@ -2004,6 +2213,19 @@
 		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
 		tb->cm_ops = &icm_tr_ops;
 		break;
+
+	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+		icm->is_supported = icm_ar_is_supported;
+		icm->driver_ready = icm_icl_driver_ready;
+		icm->set_uuid = icm_icl_set_uuid;
+		icm->device_connected = icm_icl_device_connected;
+		icm->device_disconnected = icm_tr_device_disconnected;
+		icm->xdomain_connected = icm_tr_xdomain_connected;
+		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+		icm->rtd3_veto = icm_icl_rtd3_veto;
+		tb->cm_ops = &icm_icl_ops;
+		break;
 	}
 
 	if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
new file mode 100644
index 0000000..ae1e926
--- /dev/null
+++ b/drivers/thunderbolt/lc.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt link controller support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include "tb.h"
+
+/**
+ * tb_lc_read_uuid() - Read switch UUID from link controller common register
+ * @sw: Switch whose UUID is read
+ * @uuid: UUID is placed here
+ */
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
+{
+	if (!sw->cap_lc)
+		return -EINVAL;
+	return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
+}
+
+static int read_lc_desc(struct tb_switch *sw, u32 *desc)
+{
+	if (!sw->cap_lc)
+		return -EINVAL;
+	return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
+}
+
+static int find_port_lc_cap(struct tb_port *port)
+{
+	struct tb_switch *sw = port->sw;
+	int start, phys, ret, size;
+	u32 desc;
+
+	ret = read_lc_desc(sw, &desc);
+	if (ret)
+		return ret;
+
+	/* Start of port LC registers */
+	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+	phys = tb_phy_port_from_link(port->port);
+
+	return sw->cap_lc + start + phys * size;
+}
+
+static int tb_lc_configure_lane(struct tb_port *port, bool configure)
+{
+	bool upstream = tb_is_upstream_port(port);
+	struct tb_switch *sw = port->sw;
+	u32 ctrl, lane;
+	int cap, ret;
+
+	if (sw->generation < 2)
+		return 0;
+
+	cap = find_port_lc_cap(port);
+	if (cap < 0)
+		return cap;
+
+	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+	if (ret)
+		return ret;
+
+	/* Resolve correct lane */
+	if (port->port % 2)
+		lane = TB_LC_SX_CTRL_L1C;
+	else
+		lane = TB_LC_SX_CTRL_L2C;
+
+	if (configure) {
+		ctrl |= lane;
+		if (upstream)
+			ctrl |= TB_LC_SX_CTRL_UPSTREAM;
+	} else {
+		ctrl &= ~lane;
+		if (upstream)
+			ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
+	}
+
+	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_link() - Let LC know about configured link
+ * @sw: Switch that is being added
+ *
+ * Informs LC of both parent switch and @sw that there is established
+ * link between the two.
+ */
+int tb_lc_configure_link(struct tb_switch *sw)
+{
+	struct tb_port *up, *down;
+	int ret;
+
+	if (!sw->config.enabled || !tb_route(sw))
+		return 0;
+
+	up = tb_upstream_port(sw);
+	down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+
+	/* Configure parent link toward this switch */
+	ret = tb_lc_configure_lane(down, true);
+	if (ret)
+		return ret;
+
+	/* Configure upstream link from this switch to the parent */
+	ret = tb_lc_configure_lane(up, true);
+	if (ret)
+		tb_lc_configure_lane(down, false);
+
+	return ret;
+}
+
+/**
+ * tb_lc_unconfigure_link() - Let LC know about unconfigured link
+ * @sw: Switch to unconfigure
+ *
+ * Informs LC of both parent switch and @sw that the link between the
+ * two does not exist anymore.
+ */
+void tb_lc_unconfigure_link(struct tb_switch *sw)
+{
+	struct tb_port *up, *down;
+
+	if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+		return;
+
+	up = tb_upstream_port(sw);
+	down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+
+	tb_lc_configure_lane(up, false);
+	tb_lc_configure_lane(down, false);
+}
+
+/**
+ * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
+ * @sw: Switch to set sleep
+ *
+ * Let the switch link controllers know that the switch is going to
+ * sleep.
+ */
+int tb_lc_set_sleep(struct tb_switch *sw)
+{
+	int start, size, nlc, ret, i;
+	u32 desc;
+
+	if (sw->generation < 2)
+		return 0;
+
+	ret = read_lc_desc(sw, &desc);
+	if (ret)
+		return ret;
+
+	/* Figure out number of link controllers */
+	nlc = desc & TB_LC_DESC_NLC_MASK;
+	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+	/* For each link controller set sleep bit */
+	for (i = 0; i < nlc; i++) {
+		unsigned int offset = sw->cap_lc + start + i * size;
+		u32 ctrl;
+
+		ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+				 offset + TB_LC_SX_CTRL, 1);
+		if (ret)
+			return ret;
+
+		ctrl |= TB_LC_SX_CTRL_SLP;
+		ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
+				  offset + TB_LC_SX_CTRL, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 5cd6bdf..641b21b 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1,10 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
  *
  * The NHI (native host interface) is the pci device that allows us to send and
  * receive frames from the thunderbolt bus.
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #include <linux/pm_runtime.h>
@@ -14,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/property.h>
 
 #include "nhi.h"
 #include "nhi_regs.h"
@@ -26,8 +29,7 @@
  * use this ring for anything else.
  */
 #define RING_E2E_UNUSED_HOPID	2
-/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
-#define RING_FIRST_USABLE_HOPID	8
+#define RING_FIRST_USABLE_HOPID	TB_PATH_MIN_HOPID
 
 /*
  * Minimal number of vectors when we use MSI-X. Two for control channel
@@ -95,9 +97,9 @@
 	else
 		new = old & ~mask;
 
-	dev_info(&ring->nhi->pdev->dev,
-		 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
-		 active ? "enabling" : "disabling", reg, bit, old, new);
+	dev_dbg(&ring->nhi->pdev->dev,
+		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+		active ? "enabling" : "disabling", reg, bit, old, new);
 
 	if (new == old)
 		dev_WARN(&ring->nhi->pdev->dev,
@@ -142,9 +144,20 @@
 	return io;
 }
 
-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
 {
-	iowrite16(value, ring_desc_base(ring) + offset);
+	/*
+	 * The other 16-bits in the register is read-only and writes to it
+	 * are ignored by the hardware so we can save one ioread32() by
+	 * filling the read-only bits with zeroes.
+	 */
+	iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+	/* See ring_iowrite_cons() above for explanation */
+	iowrite32(prod << 16, ring_desc_base(ring) + 8);
 }
 
 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -196,7 +209,10 @@
 			descriptor->sof = frame->sof;
 		}
 		ring->head = (ring->head + 1) % ring->size;
-		ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+		if (ring->is_tx)
+			ring_iowrite_prod(ring, ring->head);
+		else
+			ring_iowrite_cons(ring, ring->head);
 	}
 }
 
@@ -476,8 +492,9 @@
 				     void *poll_data)
 {
 	struct tb_ring *ring = NULL;
-	dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
-		 transmit ? "TX" : "RX", hop, size);
+
+	dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+		transmit ? "TX" : "RX", hop, size);
 
 	/* Tx Ring 2 is reserved for E2E workaround */
 	if (transmit && hop == RING_E2E_UNUSED_HOPID)
@@ -585,8 +602,8 @@
 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
 		goto err;
 	}
-	dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
-		 RING_TYPE(ring), ring->hop);
+	dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
+		RING_TYPE(ring), ring->hop);
 
 	if (ring->flags & RING_FLAG_FRAME) {
 		/* Means 4096 */
@@ -647,8 +664,8 @@
 {
 	spin_lock_irq(&ring->nhi->lock);
 	spin_lock(&ring->lock);
-	dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
-		 RING_TYPE(ring), ring->hop);
+	dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
+		RING_TYPE(ring), ring->hop);
 	if (ring->nhi->going_away)
 		goto err;
 	if (!ring->running) {
@@ -660,7 +677,7 @@
 
 	ring_iowrite32options(ring, 0, 0);
 	ring_iowrite64desc(ring, 0, 0);
-	ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+	ring_iowrite32desc(ring, 0, 8);
 	ring_iowrite32desc(ring, 0, 12);
 	ring->head = 0;
 	ring->tail = 0;
@@ -716,10 +733,8 @@
 	ring->descriptors_dma = 0;
 
 
-	dev_info(&ring->nhi->pdev->dev,
-		 "freeing %s %d\n",
-		 RING_TYPE(ring),
-		 ring->hop);
+	dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
+		ring->hop);
 
 	/**
 	 * ring->work can no longer be scheduled (it is scheduled only
@@ -845,12 +860,52 @@
 	return IRQ_HANDLED;
 }
 
-static int nhi_suspend_noirq(struct device *dev)
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
 
-	return tb_domain_suspend_noirq(tb);
+	ret = tb_domain_suspend_noirq(tb);
+	if (ret)
+		return ret;
+
+	if (nhi->ops && nhi->ops->suspend_noirq) {
+		ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+	return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
+}
+
+static bool nhi_wake_supported(struct pci_dev *pdev)
+{
+	u8 val;
+
+	/*
+	 * If power rails are sustainable for wakeup from S4 this
+	 * property is set by the BIOS.
+	 */
+	if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+		return !!val;
+
+	return true;
+}
+
+static int nhi_poweroff_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	bool wakeup;
+
+	wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
+	return __nhi_suspend_noirq(dev, wakeup);
 }
 
 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
@@ -873,16 +928,24 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
 
 	/*
 	 * Check that the device is still there. It may be that the user
 	 * unplugged last device which causes the host controller to go
 	 * away on PCs.
 	 */
-	if (!pci_device_is_present(pdev))
-		tb->nhi->going_away = true;
-	else
+	if (!pci_device_is_present(pdev)) {
+		nhi->going_away = true;
+	} else {
+		if (nhi->ops && nhi->ops->resume_noirq) {
+			ret = nhi->ops->resume_noirq(nhi);
+			if (ret)
+				return ret;
+		}
 		nhi_enable_int_throttling(tb->nhi);
+	}
 
 	return tb_domain_resume_noirq(tb);
 }
@@ -915,23 +978,43 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
 
-	return tb_domain_runtime_suspend(tb);
+	ret = tb_domain_runtime_suspend(tb);
+	if (ret)
+		return ret;
+
+	if (nhi->ops && nhi->ops->runtime_suspend) {
+		ret = nhi->ops->runtime_suspend(tb->nhi);
+		if (ret)
+			return ret;
+	}
+	return 0;
 }
 
 static int nhi_runtime_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
 
-	nhi_enable_int_throttling(tb->nhi);
+	if (nhi->ops && nhi->ops->runtime_resume) {
+		ret = nhi->ops->runtime_resume(nhi);
+		if (ret)
+			return ret;
+	}
+
+	nhi_enable_int_throttling(nhi);
 	return tb_domain_runtime_resume(tb);
 }
 
 static void nhi_shutdown(struct tb_nhi *nhi)
 {
 	int i;
-	dev_info(&nhi->pdev->dev, "shutdown\n");
+
+	dev_dbg(&nhi->pdev->dev, "shutdown\n");
 
 	for (i = 0; i < nhi->hop_count; i++) {
 		if (nhi->tx_rings[i])
@@ -951,6 +1034,9 @@
 		flush_work(&nhi->interrupt_work);
 	}
 	ida_destroy(&nhi->msix_ida);
+
+	if (nhi->ops && nhi->ops->shutdown)
+		nhi->ops->shutdown(nhi);
 }
 
 static int nhi_init_msi(struct tb_nhi *nhi)
@@ -995,12 +1081,27 @@
 	return 0;
 }
 
+static bool nhi_imr_valid(struct pci_dev *pdev)
+{
+	u8 val;
+
+	if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
+		return !!val;
+
+	return true;
+}
+
 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct tb_nhi *nhi;
 	struct tb *tb;
 	int res;
 
+	if (!nhi_imr_valid(pdev)) {
+		dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
+		return -ENODEV;
+	}
+
 	res = pcim_enable_device(pdev);
 	if (res) {
 		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
@@ -1018,6 +1119,7 @@
 		return -ENOMEM;
 
 	nhi->pdev = pdev;
+	nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
 	/* cannot fail - table is allocated bin pcim_iomap_regions */
 	nhi->iobase = pcim_iomap_table(pdev)[0];
 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
@@ -1050,6 +1152,12 @@
 
 	pci_set_master(pdev);
 
+	if (nhi->ops && nhi->ops->init) {
+		res = nhi->ops->init(nhi);
+		if (res)
+			return res;
+	}
+
 	tb = icm_probe(nhi);
 	if (!tb)
 		tb = tb_probe(nhi);
@@ -1059,7 +1167,7 @@
 		return -ENODEV;
 	}
 
-	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+	dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
 
 	res = tb_domain_add(tb);
 	if (res) {
@@ -1110,6 +1218,7 @@
 	.restore_noirq = nhi_resume_noirq,
 	.suspend = nhi_suspend,
 	.freeze = nhi_suspend,
+	.poweroff_noirq = nhi_poweroff_noirq,
 	.poweroff = nhi_suspend,
 	.complete = nhi_complete,
 	.runtime_suspend = nhi_runtime_suspend,
@@ -1157,6 +1266,10 @@
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
+	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
 
 	{ 0,}
 };
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1696a45..b7b9739 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,8 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #ifndef DSL3510_H_
@@ -29,6 +30,26 @@
 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
 
+/**
+ * struct tb_nhi_ops - NHI specific optional operations
+ * @init: NHI specific initialization
+ * @suspend_noirq: NHI specific suspend_noirq hook
+ * @resume_noirq: NHI specific resume_noirq hook
+ * @runtime_suspend: NHI specific runtime_suspend hook
+ * @runtime_resume: NHI specific runtime_resume hook
+ * @shutdown: NHI specific shutdown
+ */
+struct tb_nhi_ops {
+	int (*init)(struct tb_nhi *nhi);
+	int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
+	int (*resume_noirq)(struct tb_nhi *nhi);
+	int (*runtime_suspend)(struct tb_nhi *nhi);
+	int (*runtime_resume)(struct tb_nhi *nhi);
+	void (*shutdown)(struct tb_nhi *nhi);
+};
+
+extern const struct tb_nhi_ops icl_nhi_ops;
+
 /*
  * PCI IDs used in this driver from Win Ridge forward. There is no
  * need for the PCI quirk anymore as we will use ICM also on Apple
@@ -50,5 +71,7 @@
 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE	0x15ea
 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI		0x15eb
 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE	0x15ef
+#define PCI_DEVICE_ID_INTEL_ICL_NHI1			0x8a0d
+#define PCI_DEVICE_ID_INTEL_ICL_NHI0			0x8a17
 
 #endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
new file mode 100644
index 0000000..6795851
--- /dev/null
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHI specific operations
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+/* Ice Lake specific NHI operations */
+
+#define ICL_LC_MAILBOX_TIMEOUT	500 /* ms */
+
+static int check_for_device(struct device *dev, void *data)
+{
+	return tb_is_switch(dev);
+}
+
+static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
+{
+	struct tb *tb = pci_get_drvdata(nhi->pdev);
+	int ret;
+
+	ret = device_for_each_child(&tb->root_switch->dev, NULL,
+				    check_for_device);
+	return ret > 0;
+}
+
+static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
+{
+	u32 vs_cap;
+
+	/*
+	 * The Thunderbolt host controller is present always in Ice Lake
+	 * but the firmware may not be loaded and running (depending
+	 * whether there is device connected and so on). Each time the
+	 * controller is used we need to "Force Power" it first and wait
+	 * for the firmware to indicate it is up and running. This "Force
+	 * Power" is really not about actually powering on/off the
+	 * controller so it is accessible even if "Force Power" is off.
+	 *
+	 * The actual power management happens inside shared ACPI power
+	 * resources using standard ACPI methods.
+	 */
+	pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
+	if (power) {
+		vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
+		vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
+		vs_cap |= VS_CAP_22_FORCE_POWER;
+	} else {
+		vs_cap &= ~VS_CAP_22_FORCE_POWER;
+	}
+	pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
+
+	if (power) {
+		unsigned int retries = 10;
+		u32 val;
+
+		/* Wait until the firmware tells it is up and running */
+		do {
+			pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
+			if (val & VS_CAP_9_FW_READY)
+				return 0;
+			msleep(250);
+		} while (--retries);
+
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
+{
+	u32 data;
+
+	data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
+	pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
+}
+
+static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
+{
+	unsigned long end;
+	u32 data;
+
+	if (!timeout)
+		goto clear;
+
+	end = jiffies + msecs_to_jiffies(timeout);
+	do {
+		pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
+		if (data & VS_CAP_18_DONE)
+			goto clear;
+		msleep(100);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+
+clear:
+	/* Clear the valid bit */
+	pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
+	return 0;
+}
+
+static void icl_nhi_set_ltr(struct tb_nhi *nhi)
+{
+	u32 max_ltr, ltr;
+
+	pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
+	max_ltr &= 0xffff;
+	/* Program the same value for both snoop and no-snoop */
+	ltr = max_ltr << 16 | max_ltr;
+	pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
+}
+
+static int icl_nhi_suspend(struct tb_nhi *nhi)
+{
+	int ret;
+
+	if (icl_nhi_is_device_connected(nhi))
+		return 0;
+
+	/*
+	 * If there is no device connected we need to perform both: a
+	 * handshake through LC mailbox and force power down before
+	 * entering D3.
+	 */
+	icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+	ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return icl_nhi_force_power(nhi, false);
+}
+
+static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
+{
+	enum icl_lc_mailbox_cmd cmd;
+
+	if (!pm_suspend_via_firmware())
+		return icl_nhi_suspend(nhi);
+
+	cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
+	icl_nhi_lc_mailbox_cmd(nhi, cmd);
+	return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+}
+
+static int icl_nhi_resume(struct tb_nhi *nhi)
+{
+	int ret;
+
+	ret = icl_nhi_force_power(nhi, true);
+	if (ret)
+		return ret;
+
+	icl_nhi_set_ltr(nhi);
+	return 0;
+}
+
+static void icl_nhi_shutdown(struct tb_nhi *nhi)
+{
+	icl_nhi_force_power(nhi, false);
+}
+
+const struct tb_nhi_ops icl_nhi_ops = {
+	.init = icl_nhi_resume,
+	.suspend_noirq = icl_nhi_suspend_noirq,
+	.resume_noirq = icl_nhi_resume,
+	.runtime_suspend = icl_nhi_suspend,
+	.runtime_resume = icl_nhi_resume,
+	.shutdown = icl_nhi_shutdown,
+};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index b3e49d1..0d4970d 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -3,6 +3,7 @@
  * Thunderbolt driver - NHI registers
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #ifndef NHI_REGS_H_
@@ -123,4 +124,41 @@
 #define REG_FW_STS_ICM_EN_INVERT	BIT(1)
 #define REG_FW_STS_ICM_EN		BIT(0)
 
+/* ICL NHI VSEC registers */
+
+/* FW ready */
+#define VS_CAP_9			0xc8
+#define VS_CAP_9_FW_READY		BIT(31)
+/* UUID */
+#define VS_CAP_10			0xcc
+#define VS_CAP_11			0xd0
+/* LTR */
+#define VS_CAP_15			0xe0
+#define VS_CAP_16			0xe4
+/* TBT2PCIe */
+#define VS_CAP_18			0xec
+#define VS_CAP_18_DONE			BIT(0)
+/* PCIe2TBT */
+#define VS_CAP_19			0xf0
+#define VS_CAP_19_VALID			BIT(0)
+#define VS_CAP_19_CMD_SHIFT		1
+#define VS_CAP_19_CMD_MASK		GENMASK(7, 1)
+/* Force power */
+#define VS_CAP_22			0xfc
+#define VS_CAP_22_FORCE_POWER		BIT(1)
+#define VS_CAP_22_DMA_DELAY_MASK	GENMASK(31, 24)
+#define VS_CAP_22_DMA_DELAY_SHIFT	24
+
+/**
+ * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
+ * @ICL_LC_GO2SX: Ask LC to enter Sx without wake
+ * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
+ * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
+ */
+enum icl_lc_mailbox_cmd {
+	ICL_LC_GO2SX = 0x02,
+	ICL_LC_GO2SX_NO_WAKE = 0x03,
+	ICL_LC_PREPARE_FOR_RESET = 0x21,
+};
+
 #endif
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ff49ad8..afe5f83 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -1,62 +1,330 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - path/tunnel functionality
+ * Thunderbolt driver - path/tunnel functionality
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
  */
 
 #include <linux/slab.h>
 #include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
 
 #include "tb.h"
 
-
-static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
+static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
 {
-	tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
-		     hop->out_port, hop->next_hop,
-		     hop->enable ? "enabled" : "disabled");
-	tb_port_info(port, "  Weight: %d Priority: %d Credits: %d Drop: %d\n",
-		     hop->weight, hop->priority,
-		     hop->initial_credits, hop->drop_packages);
-	tb_port_info(port, "   Counter enabled: %d Counter index: %d\n",
-		     hop->counter_enable, hop->counter);
-	tb_port_info(port, "  Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
-		     hop->ingress_fc, hop->egress_fc,
-		     hop->ingress_shared_buffer, hop->egress_shared_buffer);
-	tb_port_info(port, "  Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
-		     hop->unknown1, hop->unknown2, hop->unknown3);
+	const struct tb_port *port = hop->in_port;
+
+	tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
+		    hop->in_hop_index, regs->out_port, regs->next_hop);
+	tb_port_dbg(port, "  Weight: %d Priority: %d Credits: %d Drop: %d\n",
+		    regs->weight, regs->priority,
+		    regs->initial_credits, regs->drop_packages);
+	tb_port_dbg(port, "   Counter enabled: %d Counter index: %d\n",
+		    regs->counter_enable, regs->counter);
+	tb_port_dbg(port, "  Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+		    regs->ingress_fc, regs->egress_fc,
+		    regs->ingress_shared_buffer, regs->egress_shared_buffer);
+	tb_port_dbg(port, "  Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+		    regs->unknown1, regs->unknown2, regs->unknown3);
+}
+
+static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
+					     int dst_hopid)
+{
+	struct tb_port *port, *out_port = NULL;
+	struct tb_regs_hop hop;
+	struct tb_switch *sw;
+	int i, ret, hopid;
+
+	hopid = src_hopid;
+	port = src;
+
+	for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
+		sw = port->sw;
+
+		ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
+		if (ret) {
+			tb_port_warn(port, "failed to read path at %d\n", hopid);
+			return NULL;
+		}
+
+		if (!hop.enable)
+			return NULL;
+
+		out_port = &sw->ports[hop.out_port];
+		hopid = hop.next_hop;
+		port = out_port->remote;
+	}
+
+	return out_port && hopid == dst_hopid ? out_port : NULL;
+}
+
+static int tb_path_find_src_hopid(struct tb_port *src,
+	const struct tb_port *dst, int dst_hopid)
+{
+	struct tb_port *out;
+	int i;
+
+	for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
+		out = tb_path_find_dst_port(src, i, dst_hopid);
+		if (out == dst)
+			return i;
+	}
+
+	return 0;
 }
 
 /**
- * tb_path_alloc() - allocate a thunderbolt path
+ * tb_path_discover() - Discover a path
+ * @src: First input port of a path
+ * @src_hopid: Starting HopID of a path (%-1 if don't care)
+ * @dst: Expected destination port of the path (%NULL if don't care)
+ * @dst_hopid: HopID to the @dst (%-1 if don't care)
+ * @last: Last port is filled here if not %NULL
+ * @name: Name of the path
  *
- * Return: Returns a tb_path on success or NULL on failure.
+ * Follows a path starting from @src and @src_hopid to the last output
+ * port of the path. Allocates HopIDs for the visited ports. Call
+ * tb_path_free() to release the path and allocated HopIDs when the path
+ * is not needed anymore.
+ *
+ * Note function discovers also incomplete paths so caller should check
+ * that the @dst port is the expected one. If it is not, the path can be
+ * cleaned up by calling tb_path_deactivate() before tb_path_free().
+ *
+ * Return: Discovered path on success, %NULL in case of failure
  */
-struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+				 struct tb_port *dst, int dst_hopid,
+				 struct tb_port **last, const char *name)
 {
-	struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
+	struct tb_port *out_port;
+	struct tb_regs_hop hop;
+	struct tb_path *path;
+	struct tb_switch *sw;
+	struct tb_port *p;
+	size_t num_hops;
+	int ret, i, h;
+
+	if (src_hopid < 0 && dst) {
+		/*
+		 * For incomplete paths the intermediate HopID can be
+		 * different from the one used by the protocol adapter
+		 * so in that case find a path that ends on @dst with
+		 * matching @dst_hopid. That should give us the correct
+		 * HopID for the @src.
+		 */
+		src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
+		if (!src_hopid)
+			return NULL;
+	}
+
+	p = src;
+	h = src_hopid;
+	num_hops = 0;
+
+	for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
+		sw = p->sw;
+
+		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+		if (ret) {
+			tb_port_warn(p, "failed to read path at %d\n", h);
+			return NULL;
+		}
+
+		/* If the hop is not enabled we got an incomplete path */
+		if (!hop.enable)
+			break;
+
+		out_port = &sw->ports[hop.out_port];
+		if (last)
+			*last = out_port;
+
+		h = hop.next_hop;
+		p = out_port->remote;
+		num_hops++;
+	}
+
+	path = kzalloc(sizeof(*path), GFP_KERNEL);
 	if (!path)
 		return NULL;
+
+	path->name = name;
+	path->tb = src->sw->tb;
+	path->path_length = num_hops;
+	path->activated = true;
+
 	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
 	if (!path->hops) {
 		kfree(path);
 		return NULL;
 	}
-	path->tb = tb;
-	path->path_length = num_hops;
+
+	p = src;
+	h = src_hopid;
+
+	for (i = 0; i < num_hops; i++) {
+		int next_hop;
+
+		sw = p->sw;
+
+		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+		if (ret) {
+			tb_port_warn(p, "failed to read path at %d\n", h);
+			goto err;
+		}
+
+		if (tb_port_alloc_in_hopid(p, h, h) < 0)
+			goto err;
+
+		out_port = &sw->ports[hop.out_port];
+		next_hop = hop.next_hop;
+
+		if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
+			tb_port_release_in_hopid(p, h);
+			goto err;
+		}
+
+		path->hops[i].in_port = p;
+		path->hops[i].in_hop_index = h;
+		path->hops[i].in_counter_index = -1;
+		path->hops[i].out_port = out_port;
+		path->hops[i].next_hop_index = next_hop;
+
+		h = next_hop;
+		p = out_port->remote;
+	}
+
 	return path;
+
+err:
+	tb_port_warn(src, "failed to discover path starting at HopID %d\n",
+		     src_hopid);
+	tb_path_free(path);
+	return NULL;
 }
 
 /**
- * tb_path_free() - free a deactivated path
+ * tb_path_alloc() - allocate a thunderbolt path between two ports
+ * @tb: Domain pointer
+ * @src: Source port of the path
+ * @src_hopid: HopID used for the first ingress port in the path
+ * @dst: Destination port of the path
+ * @dst_hopid: HopID used for the last egress port in the path
+ * @link_nr: Preferred link if there are dual links on the path
+ * @name: Name of the path
+ *
+ * Creates path between two ports starting with given @src_hopid. Reserves
+ * HopIDs for each port (they can be different from @src_hopid depending on
+ * how many HopIDs each port already have reserved). If there are dual
+ * links on the path, prioritizes using @link_nr.
+ *
+ * Return: Returns a tb_path on success or NULL on failure.
+ */
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+			      struct tb_port *dst, int dst_hopid, int link_nr,
+			      const char *name)
+{
+	struct tb_port *in_port, *out_port;
+	int in_hopid, out_hopid;
+	struct tb_path *path;
+	size_t num_hops;
+	int i, ret;
+
+	path = kzalloc(sizeof(*path), GFP_KERNEL);
+	if (!path)
+		return NULL;
+
+	/*
+	 * Number of hops on a path is the distance between the two
+	 * switches plus the source adapter port.
+	 */
+	num_hops = abs(tb_route_length(tb_route(src->sw)) -
+		       tb_route_length(tb_route(dst->sw))) + 1;
+
+	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+	if (!path->hops) {
+		kfree(path);
+		return NULL;
+	}
+
+	in_hopid = src_hopid;
+	out_port = NULL;
+
+	for (i = 0; i < num_hops; i++) {
+		in_port = tb_next_port_on_path(src, dst, out_port);
+		if (!in_port)
+			goto err;
+
+		if (in_port->dual_link_port && in_port->link_nr != link_nr)
+			in_port = in_port->dual_link_port;
+
+		ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
+		if (ret < 0)
+			goto err;
+		in_hopid = ret;
+
+		out_port = tb_next_port_on_path(src, dst, in_port);
+		if (!out_port)
+			goto err;
+
+		if (out_port->dual_link_port && out_port->link_nr != link_nr)
+			out_port = out_port->dual_link_port;
+
+		if (i == num_hops - 1)
+			ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
+						      dst_hopid);
+		else
+			ret = tb_port_alloc_out_hopid(out_port, -1, -1);
+
+		if (ret < 0)
+			goto err;
+		out_hopid = ret;
+
+		path->hops[i].in_hop_index = in_hopid;
+		path->hops[i].in_port = in_port;
+		path->hops[i].in_counter_index = -1;
+		path->hops[i].out_port = out_port;
+		path->hops[i].next_hop_index = out_hopid;
+
+		in_hopid = out_hopid;
+	}
+
+	path->tb = tb;
+	path->path_length = num_hops;
+	path->name = name;
+
+	return path;
+
+err:
+	tb_path_free(path);
+	return NULL;
+}
+
+/**
+ * tb_path_free() - free a path
+ * @path: Path to free
+ *
+ * Frees a path. The path does not need to be deactivated.
  */
 void tb_path_free(struct tb_path *path)
 {
-	if (path->activated) {
-		tb_WARN(path->tb, "trying to free an activated path\n")
-		return;
+	int i;
+
+	for (i = 0; i < path->path_length; i++) {
+		const struct tb_path_hop *hop = &path->hops[i];
+
+		if (hop->in_port)
+			tb_port_release_in_hopid(hop->in_port,
+						 hop->in_hop_index);
+		if (hop->out_port)
+			tb_port_release_out_hopid(hop->out_port,
+						  hop->next_hop_index);
 	}
+
 	kfree(path->hops);
 	kfree(path);
 }
@@ -74,14 +342,65 @@
 	}
 }
 
+static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
+				    bool clear_fc)
+{
+	struct tb_regs_hop hop;
+	ktime_t timeout;
+	int ret;
+
+	/* Disable the path */
+	ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+	if (ret)
+		return ret;
+
+	/* Already disabled */
+	if (!hop.enable)
+		return 0;
+
+	hop.enable = 0;
+
+	ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+	if (ret)
+		return ret;
+
+	/* Wait until it is drained */
+	timeout = ktime_add_ms(ktime_get(), 500);
+	do {
+		ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+		if (ret)
+			return ret;
+
+		if (!hop.pending) {
+			if (clear_fc) {
+				/* Clear flow control */
+				hop.ingress_fc = 0;
+				hop.egress_fc = 0;
+				hop.ingress_shared_buffer = 0;
+				hop.egress_shared_buffer = 0;
+
+				return tb_port_write(port, &hop, TB_CFG_HOPS,
+						     2 * hop_index, 2);
+			}
+
+			return 0;
+		}
+
+		usleep_range(10, 20);
+	} while (ktime_before(ktime_get(), timeout));
+
+	return -ETIMEDOUT;
+}
+
 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
 {
 	int i, res;
-	struct tb_regs_hop hop = { };
+
 	for (i = first_hop; i < path->path_length; i++) {
-		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
-				    2 * path->hops[i].in_hop_index, 2);
-		if (res)
+		res = __tb_path_deactivate_hop(path->hops[i].in_port,
+					       path->hops[i].in_hop_index,
+					       path->clear_fc);
+		if (res && res != -ENODEV)
 			tb_port_warn(path->hops[i].in_port,
 				     "hop deactivation failed for hop %d, index %d\n",
 				     i, path->hops[i].in_hop_index);
@@ -94,12 +413,12 @@
 		tb_WARN(path->tb, "trying to deactivate an inactive path\n");
 		return;
 	}
-	tb_info(path->tb,
-		"deactivating path from %llx:%x to %llx:%x\n",
-		tb_route(path->hops[0].in_port->sw),
-		path->hops[0].in_port->port,
-		tb_route(path->hops[path->path_length - 1].out_port->sw),
-		path->hops[path->path_length - 1].out_port->port);
+	tb_dbg(path->tb,
+	       "deactivating %s path from %llx:%x to %llx:%x\n",
+	       path->name, tb_route(path->hops[0].in_port->sw),
+	       path->hops[0].in_port->port,
+	       tb_route(path->hops[path->path_length - 1].out_port->sw),
+	       path->hops[path->path_length - 1].out_port->port);
 	__tb_path_deactivate_hops(path, 0);
 	__tb_path_deallocate_nfc(path, 0);
 	path->activated = false;
@@ -122,12 +441,12 @@
 		return -EINVAL;
 	}
 
-	tb_info(path->tb,
-		"activating path from %llx:%x to %llx:%x\n",
-		tb_route(path->hops[0].in_port->sw),
-		path->hops[0].in_port->port,
-		tb_route(path->hops[path->path_length - 1].out_port->sw),
-		path->hops[path->path_length - 1].out_port->port);
+	tb_dbg(path->tb,
+	       "activating %s path from %llx:%x to %llx:%x\n",
+	       path->name, tb_route(path->hops[0].in_port->sw),
+	       path->hops[0].in_port->port,
+	       tb_route(path->hops[path->path_length - 1].out_port->sw),
+	       path->hops[path->path_length - 1].out_port->port);
 
 	/* Clear counters. */
 	for (i = path->path_length - 1; i >= 0; i--) {
@@ -153,30 +472,14 @@
 	for (i = path->path_length - 1; i >= 0; i--) {
 		struct tb_regs_hop hop = { 0 };
 
-		/*
-		 * We do (currently) not tear down paths setup by the firmeware.
-		 * If a firmware device is unplugged and plugged in again then
-		 * it can happen that we reuse some of the hops from the (now
-		 * defunct) firmeware path. This causes the hotplug operation to
-		 * fail (the pci device does not show up). Clearing the hop
-		 * before overwriting it fixes the problem.
-		 *
-		 * Should be removed once we discover and tear down firmeware
-		 * paths.
-		 */
-		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
-				    2 * path->hops[i].in_hop_index, 2);
-		if (res) {
-			__tb_path_deactivate_hops(path, i);
-			__tb_path_deallocate_nfc(path, 0);
-			goto err;
-		}
+		/* If it is left active deactivate it first */
+		__tb_path_deactivate_hop(path->hops[i].in_port,
+				path->hops[i].in_hop_index, path->clear_fc);
 
 		/* dword 0 */
 		hop.next_hop = path->hops[i].next_hop_index;
 		hop.out_port = path->hops[i].out_port->port;
-		/* TODO: figure out why these are good values */
-		hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
+		hop.initial_credits = path->hops[i].initial_credits;
 		hop.unknown1 = 0;
 		hop.enable = 1;
 
@@ -198,9 +501,8 @@
 					    & out_mask;
 		hop.unknown3 = 0;
 
-		tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
-			     i, path->hops[i].in_hop_index);
-		tb_dump_hop(path->hops[i].in_port, &hop);
+		tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
+		tb_dump_hop(&path->hops[i], &hop);
 		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
 				    2 * path->hops[i].in_hop_index, 2);
 		if (res) {
@@ -210,7 +512,7 @@
 		}
 	}
 	path->activated = true;
-	tb_info(path->tb, "path activation complete\n");
+	tb_dbg(path->tb, "path activation complete\n");
 	return 0;
 err:
 	tb_WARN(path->tb, "path activation failed\n");
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index 8fe913a..d5b0cdb 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Thunderbolt XDomain property support
  *
  * Copyright (C) 2017, Intel Corporation
  * Authors: Michael Jamet <michael.jamet@intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/err.h>
@@ -179,6 +176,10 @@
 	} else {
 		dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
 				    GFP_KERNEL);
+		if (!dir->uuid) {
+			tb_property_free_dir(dir);
+			return NULL;
+		}
 		content_offset = dir_offset + 4;
 		content_len = dir_len - 4; /* Length includes UUID */
 	}
@@ -551,6 +552,11 @@
 
 	property->length = size / 4;
 	property->value.data = kzalloc(size, GFP_KERNEL);
+	if (!property->value.data) {
+		kfree(property);
+		return -ENOMEM;
+	}
+
 	memcpy(property->value.data, buf, buflen);
 
 	list_add_tail(&property->list, &parent->properties);
@@ -581,7 +587,12 @@
 		return -ENOMEM;
 
 	property->length = size / 4;
-	property->value.data = kzalloc(size, GFP_KERNEL);
+	property->value.text = kzalloc(size, GFP_KERNEL);
+	if (!property->value.text) {
+		kfree(property);
+		return -ENOMEM;
+	}
+
 	strcpy(property->value.text, text);
 
 	list_add_tail(&property->list, &parent->properties);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index dd9ae6f..c5974c9 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,23 +1,22 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - switch/port utility functions
+ * Thunderbolt driver - switch/port utility functions
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #include <linux/delay.h>
 #include <linux/idr.h>
 #include <linux/nvmem-provider.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include "tb.h"
 
-/* Switch authorization from userspace is serialized by this lock */
-static DEFINE_MUTEX(switch_lock);
-
 /* Switch NVM support */
 
 #define NVM_DEVID		0x05
@@ -169,7 +168,7 @@
 
 static int nvm_authenticate_host(struct tb_switch *sw)
 {
-	int ret;
+	int ret = 0;
 
 	/*
 	 * Root switch NVM upgrade requires that we disconnect the
@@ -177,6 +176,8 @@
 	 * already).
 	 */
 	if (!sw->safe_mode) {
+		u32 status;
+
 		ret = tb_domain_disconnect_all_paths(sw->tb);
 		if (ret)
 			return ret;
@@ -185,7 +186,16 @@
 		 * everything goes well so getting timeout is expected.
 		 */
 		ret = dma_port_flash_update_auth(sw->dma_port);
-		return ret == -ETIMEDOUT ? 0 : ret;
+		if (!ret || ret == -ETIMEDOUT)
+			return 0;
+
+		/*
+		 * Any error from update auth operation requires power
+		 * cycling of the host router.
+		 */
+		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+			nvm_set_auth_status(sw, status);
 	}
 
 	/*
@@ -193,7 +203,7 @@
 	 * switch.
 	 */
 	dma_port_power_cycle(sw->dma_port);
-	return 0;
+	return ret;
 }
 
 static int nvm_authenticate_device(struct tb_switch *sw)
@@ -201,8 +211,16 @@
 	int ret, retries = 10;
 
 	ret = dma_port_flash_update_auth(sw->dma_port);
-	if (ret && ret != -ETIMEDOUT)
+	switch (ret) {
+	case 0:
+	case -ETIMEDOUT:
+	case -EACCES:
+	case -EINVAL:
+		/* Power cycle is required */
+		break;
+	default:
 		return ret;
+	}
 
 	/*
 	 * Poll here for the authentication status. It takes some time
@@ -240,7 +258,16 @@
 	int ret;
 
 	pm_runtime_get_sync(&sw->dev);
+
+	if (!mutex_trylock(&sw->tb->lock)) {
+		ret = restart_syscall();
+		goto out;
+	}
+
 	ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+	mutex_unlock(&sw->tb->lock);
+
+out:
 	pm_runtime_mark_last_busy(&sw->dev);
 	pm_runtime_put_autosuspend(&sw->dev);
 
@@ -253,8 +280,8 @@
 	struct tb_switch *sw = priv;
 	int ret = 0;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	/*
 	 * Since writing the NVM image might require some special steps,
@@ -274,7 +301,7 @@
 	memcpy(sw->nvm->buf + offset, val, bytes);
 
 unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 
 	return ret;
 }
@@ -356,17 +383,16 @@
 		nvm->active = nvm_dev;
 	}
 
-	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
-	if (IS_ERR(nvm_dev)) {
-		ret = PTR_ERR(nvm_dev);
-		goto err_nvm_active;
+	if (!sw->no_nvm_upgrade) {
+		nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+		if (IS_ERR(nvm_dev)) {
+			ret = PTR_ERR(nvm_dev);
+			goto err_nvm_active;
+		}
+		nvm->non_active = nvm_dev;
 	}
-	nvm->non_active = nvm_dev;
 
-	mutex_lock(&switch_lock);
 	sw->nvm = nvm;
-	mutex_unlock(&switch_lock);
-
 	return 0;
 
 err_nvm_active:
@@ -383,10 +409,8 @@
 {
 	struct tb_switch_nvm *nvm;
 
-	mutex_lock(&switch_lock);
 	nvm = sw->nvm;
 	sw->nvm = NULL;
-	mutex_unlock(&switch_lock);
 
 	if (!nvm)
 		return;
@@ -395,7 +419,8 @@
 	if (!nvm->authenticating)
 		nvm_clear_auth_status(sw);
 
-	nvmem_unregister(nvm->non_active);
+	if (nvm->non_active)
+		nvmem_unregister(nvm->non_active);
 	if (nvm->active)
 		nvmem_unregister(nvm->active);
 	ida_simple_remove(&nvm_ida, nvm->id);
@@ -436,15 +461,15 @@
 
 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
 {
-	tb_info(tb,
-		" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
-		port->port_number, port->vendor_id, port->device_id,
-		port->revision, port->thunderbolt_version, tb_port_type(port),
-		port->type);
-	tb_info(tb, "  Max hop id (in/out): %d/%d\n",
-		port->max_in_hop_id, port->max_out_hop_id);
-	tb_info(tb, "  Max counters: %d\n", port->max_counters);
-	tb_info(tb, "  NFC Credits: %#x\n", port->nfc_credits);
+	tb_dbg(tb,
+	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+	       port->port_number, port->vendor_id, port->device_id,
+	       port->revision, port->thunderbolt_version, tb_port_type(port),
+	       port->type);
+	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
+	       port->max_in_hop_id, port->max_out_hop_id);
+	tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
+	tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
 }
 
 /**
@@ -499,23 +524,22 @@
 		if (state < 0)
 			return state;
 		if (state == TB_PORT_DISABLED) {
-			tb_port_info(port, "is disabled (state: 0)\n");
+			tb_port_dbg(port, "is disabled (state: 0)\n");
 			return 0;
 		}
 		if (state == TB_PORT_UNPLUGGED) {
 			if (wait_if_unplugged) {
 				/* used during resume */
-				tb_port_info(port,
-					     "is unplugged (state: 7), retrying...\n");
+				tb_port_dbg(port,
+					    "is unplugged (state: 7), retrying...\n");
 				msleep(100);
 				continue;
 			}
-			tb_port_info(port, "is unplugged (state: 7)\n");
+			tb_port_dbg(port, "is unplugged (state: 7)\n");
 			return 0;
 		}
 		if (state == TB_PORT_UP) {
-			tb_port_info(port,
-				     "is connected, link is up (state: 2)\n");
+			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
 			return 1;
 		}
 
@@ -523,9 +547,9 @@
 		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 		 * time.
 		 */
-		tb_port_info(port,
-			     "is connected, link is not up (state: %d), retrying...\n",
-			     state);
+		tb_port_dbg(port,
+			    "is connected, link is not up (state: %d), retrying...\n",
+			    state);
 		msleep(100);
 	}
 	tb_port_warn(port,
@@ -543,19 +567,47 @@
  */
 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 {
-	if (credits == 0)
+	u32 nfc_credits;
+
+	if (credits == 0 || port->sw->is_unplugged)
 		return 0;
-	tb_port_info(port,
-		     "adding %#x NFC credits (%#x -> %#x)",
-		     credits,
-		     port->config.nfc_credits,
-		     port->config.nfc_credits + credits);
-	port->config.nfc_credits += credits;
+
+	nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+	nfc_credits += credits;
+
+	tb_port_dbg(port, "adding %d NFC credits to %lu",
+		    credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+
+	port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+	port->config.nfc_credits |= nfc_credits;
+
 	return tb_port_write(port, &port->config.nfc_credits,
 			     TB_CFG_PORT, 4, 1);
 }
 
 /**
+ * tb_port_set_initial_credits() - Set initial port link credits allocated
+ * @port: Port to set the initial credits
+ * @credits: Number of credits to to allocate
+ *
+ * Set initial credits value to be used for ingress shared buffering.
+ */
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
+{
+	u32 data;
+	int ret;
+
+	ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+	if (ret)
+		return ret;
+
+	data &= ~TB_PORT_LCA_MASK;
+	data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+
+	return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+}
+
+/**
  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
  *
  * Return: Returns 0 on success or an error code on failure.
@@ -563,7 +615,7 @@
 int tb_port_clear_counter(struct tb_port *port, int counter)
 {
 	u32 zero[3] = { 0, 0, 0 };
-	tb_port_info(port, "clearing counter %d\n", counter);
+	tb_port_dbg(port, "clearing counter %d\n", counter);
 	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 }
 
@@ -581,8 +633,14 @@
 	int cap;
 
 	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
-	if (res)
+	if (res) {
+		if (res == -ENODEV) {
+			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
+			       port->port);
+			return 0;
+		}
 		return res;
+	}
 
 	/* Port 0 is the switch itself and has no PHY. */
 	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
@@ -592,33 +650,323 @@
 			port->cap_phy = cap;
 		else
 			tb_port_WARN(port, "non switch port without a PHY\n");
+	} else if (port->port != 0) {
+		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+		if (cap > 0)
+			port->cap_adap = cap;
 	}
 
 	tb_dump_port(port->sw->tb, &port->config);
 
-	/* TODO: Read dual link port, DP port and more from EEPROM. */
+	/* Control port does not need HopID allocation */
+	if (port->port) {
+		ida_init(&port->in_hopids);
+		ida_init(&port->out_hopids);
+	}
+
 	return 0;
 
 }
 
+static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
+			       int max_hopid)
+{
+	int port_max_hopid;
+	struct ida *ida;
+
+	if (in) {
+		port_max_hopid = port->config.max_in_hop_id;
+		ida = &port->in_hopids;
+	} else {
+		port_max_hopid = port->config.max_out_hop_id;
+		ida = &port->out_hopids;
+	}
+
+	/* HopIDs 0-7 are reserved */
+	if (min_hopid < TB_PATH_MIN_HOPID)
+		min_hopid = TB_PATH_MIN_HOPID;
+
+	if (max_hopid < 0 || max_hopid > port_max_hopid)
+		max_hopid = port_max_hopid;
+
+	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+}
+
+/**
+ * tb_port_alloc_in_hopid() - Allocate input HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable input HopID
+ * @max_hopid: Maximum acceptable input HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_alloc_out_hopid() - Allocate output HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable output HopID
+ * @max_hopid: Maximum acceptable output HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_release_in_hopid() - Release allocated input HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_in_hopid(struct tb_port *port, int hopid)
+{
+	ida_simple_remove(&port->in_hopids, hopid);
+}
+
+/**
+ * tb_port_release_out_hopid() - Release allocated output HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_out_hopid(struct tb_port *port, int hopid)
+{
+	ida_simple_remove(&port->out_hopids, hopid);
+}
+
+/**
+ * tb_next_port_on_path() - Return next port for given port on a path
+ * @start: Start port of the walk
+ * @end: End port of the walk
+ * @prev: Previous port (%NULL if this is the first)
+ *
+ * This function can be used to walk from one port to another if they
+ * are connected through zero or more switches. If the @prev is dual
+ * link port, the function follows that link and returns another end on
+ * that same link.
+ *
+ * If the @end port has been reached, return %NULL.
+ *
+ * Domain tb->lock must be held when this function is called.
+ */
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+				     struct tb_port *prev)
+{
+	struct tb_port *next;
+
+	if (!prev)
+		return start;
+
+	if (prev->sw == end->sw) {
+		if (prev == end)
+			return NULL;
+		return end;
+	}
+
+	if (start->sw->config.depth < end->sw->config.depth) {
+		if (prev->remote &&
+		    prev->remote->sw->config.depth > prev->sw->config.depth)
+			next = prev->remote;
+		else
+			next = tb_port_at(tb_route(end->sw), prev->sw);
+	} else {
+		if (tb_is_upstream_port(prev)) {
+			next = prev->remote;
+		} else {
+			next = tb_upstream_port(prev->sw);
+			/*
+			 * Keep the same link if prev and next are both
+			 * dual link ports.
+			 */
+			if (next->dual_link_port &&
+			    next->link_nr != prev->link_nr) {
+				next = next->dual_link_port;
+			}
+		}
+	}
+
+	return next;
+}
+
+/**
+ * tb_port_is_enabled() - Is the adapter port enabled
+ * @port: Port to check
+ */
+bool tb_port_is_enabled(struct tb_port *port)
+{
+	switch (port->config.type) {
+	case TB_TYPE_PCIE_UP:
+	case TB_TYPE_PCIE_DOWN:
+		return tb_pci_port_is_enabled(port);
+
+	case TB_TYPE_DP_HDMI_IN:
+	case TB_TYPE_DP_HDMI_OUT:
+		return tb_dp_port_is_enabled(port);
+
+	default:
+		return false;
+	}
+}
+
+/**
+ * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
+ * @port: PCIe port to check
+ */
+bool tb_pci_port_is_enabled(struct tb_port *port)
+{
+	u32 data;
+
+	if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+		return false;
+
+	return !!(data & TB_PCI_EN);
+}
+
+/**
+ * tb_pci_port_enable() - Enable PCIe adapter port
+ * @port: PCIe port to enable
+ * @enable: Enable/disable the PCIe adapter
+ */
+int tb_pci_port_enable(struct tb_port *port, bool enable)
+{
+	u32 word = enable ? TB_PCI_EN : 0x0;
+	if (!port->cap_adap)
+		return -ENXIO;
+	return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+}
+
+/**
+ * tb_dp_port_hpd_is_active() - Is HPD already active
+ * @port: DP out port to check
+ *
+ * Checks if the DP OUT adapter port has HDP bit already set.
+ */
+int tb_dp_port_hpd_is_active(struct tb_port *port)
+{
+	u32 data;
+	int ret;
+
+	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+	if (ret)
+		return ret;
+
+	return !!(data & TB_DP_HDP);
+}
+
+/**
+ * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
+ * @port: Port to clear HPD
+ *
+ * If the DP IN port has HDP set, this function can be used to clear it.
+ */
+int tb_dp_port_hpd_clear(struct tb_port *port)
+{
+	u32 data;
+	int ret;
+
+	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+	if (ret)
+		return ret;
+
+	data |= TB_DP_HPDC;
+	return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+}
+
+/**
+ * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
+ * @port: DP IN/OUT port to set hops
+ * @video: Video Hop ID
+ * @aux_tx: AUX TX Hop ID
+ * @aux_rx: AUX RX Hop ID
+ *
+ * Programs specified Hop IDs for DP IN/OUT port.
+ */
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+			unsigned int aux_tx, unsigned int aux_rx)
+{
+	u32 data[2];
+	int ret;
+
+	ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+			   ARRAY_SIZE(data));
+	if (ret)
+		return ret;
+
+	data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
+	data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+
+	data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
+	data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
+	data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+
+	return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
+			     ARRAY_SIZE(data));
+}
+
+/**
+ * tb_dp_port_is_enabled() - Is DP adapter port enabled
+ * @port: DP adapter port to check
+ */
+bool tb_dp_port_is_enabled(struct tb_port *port)
+{
+	u32 data[2];
+
+	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+			 ARRAY_SIZE(data)))
+		return false;
+
+	return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+}
+
+/**
+ * tb_dp_port_enable() - Enables/disables DP paths of a port
+ * @port: DP IN/OUT port
+ * @enable: Enable/disable DP path
+ *
+ * Once Hop IDs are programmed DP paths can be enabled or disabled by
+ * calling this function.
+ */
+int tb_dp_port_enable(struct tb_port *port, bool enable)
+{
+	u32 data[2];
+	int ret;
+
+	ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+			   ARRAY_SIZE(data));
+	if (ret)
+		return ret;
+
+	if (enable)
+		data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+	else
+		data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+
+	return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
+			     ARRAY_SIZE(data));
+}
+
 /* switch utility functions */
 
 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
 {
-	tb_info(tb,
-		" Switch: %x:%x (Revision: %d, TB Version: %d)\n",
-		sw->vendor_id, sw->device_id, sw->revision,
-		sw->thunderbolt_version);
-	tb_info(tb, "  Max Port Number: %d\n", sw->max_port_number);
-	tb_info(tb, "  Config:\n");
-	tb_info(tb,
+	tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+	       sw->vendor_id, sw->device_id, sw->revision,
+	       sw->thunderbolt_version);
+	tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
+	tb_dbg(tb, "  Config:\n");
+	tb_dbg(tb,
 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
-		sw->upstream_port_number, sw->depth,
-		(((u64) sw->route_hi) << 32) | sw->route_lo,
-		sw->enabled, sw->plug_events_delay);
-	tb_info(tb,
-		"   unknown1: %#x unknown4: %#x\n",
-		sw->__unknown1, sw->__unknown4);
+	       sw->upstream_port_number, sw->depth,
+	       (((u64) sw->route_hi) << 32) | sw->route_lo,
+	       sw->enabled, sw->plug_events_delay);
+	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
+	       sw->__unknown1, sw->__unknown4);
 }
 
 /**
@@ -634,7 +982,7 @@
 		header.route_lo = route,
 		header.enabled = true,
 	};
-	tb_info(tb, "resetting switch at %llx\n", route);
+	tb_dbg(tb, "resetting switch at %llx\n", route);
 	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
 			0, 2, 2, 2);
 	if (res.err)
@@ -645,24 +993,6 @@
 	return res.err;
 }
 
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
-{
-	u8 next_port = route; /*
-			       * Routes use a stride of 8 bits,
-			       * eventhough a port index has 6 bits at most.
-			       * */
-	if (route == 0)
-		return sw;
-	if (next_port > sw->config.max_port_number)
-		return NULL;
-	if (tb_is_upstream_port(&sw->ports[next_port]))
-		return NULL;
-	if (!sw->ports[next_port].remote)
-		return NULL;
-	return get_switch_at_route(sw->ports[next_port].remote->sw,
-				   route >> TB_ROUTE_SHIFT);
-}
-
 /**
  * tb_plug_events_active() - enable/disable plug events on a switch
  *
@@ -717,20 +1047,12 @@
 {
 	int ret = -EINVAL;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->authorized)
 		goto unlock;
 
-	/*
-	 * Make sure there is no PCIe rescan ongoing when a new PCIe
-	 * tunnel is created. Otherwise the PCIe rescan code might find
-	 * the new tunnel too early.
-	 */
-	pci_lock_rescan_remove();
-	pm_runtime_get_sync(&sw->dev);
-
 	switch (val) {
 	/* Approve switch */
 	case 1:
@@ -750,10 +1072,6 @@
 		break;
 	}
 
-	pm_runtime_mark_last_busy(&sw->dev);
-	pm_runtime_put_autosuspend(&sw->dev);
-	pci_unlock_rescan_remove();
-
 	if (!ret) {
 		sw->authorized = val;
 		/* Notify status change to the userspace */
@@ -761,7 +1079,7 @@
 	}
 
 unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 
@@ -779,7 +1097,10 @@
 	if (val > 2)
 		return -EINVAL;
 
+	pm_runtime_get_sync(&sw->dev);
 	ret = tb_switch_set_authorized(sw, val);
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
 
 	return ret ? ret : count;
 }
@@ -818,15 +1139,15 @@
 	struct tb_switch *sw = tb_to_switch(dev);
 	ssize_t ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->key)
 		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
 	else
 		ret = sprintf(buf, "\n");
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 
@@ -843,8 +1164,8 @@
 	else if (hex2bin(key, buf, sizeof(key)))
 		return -EINVAL;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->authorized) {
 		ret = -EBUSY;
@@ -859,7 +1180,7 @@
 		}
 	}
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 	return ret;
 }
 static DEVICE_ATTR(key, 0600, key_show, key_store);
@@ -905,8 +1226,12 @@
 	bool val;
 	int ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	pm_runtime_get_sync(&sw->dev);
+
+	if (!mutex_trylock(&sw->tb->lock)) {
+		ret = restart_syscall();
+		goto exit_rpm;
+	}
 
 	/* If NVMem devices are not yet added */
 	if (!sw->nvm) {
@@ -927,13 +1252,9 @@
 			goto exit_unlock;
 		}
 
-		pm_runtime_get_sync(&sw->dev);
 		ret = nvm_validate_and_write(sw);
-		if (ret) {
-			pm_runtime_mark_last_busy(&sw->dev);
-			pm_runtime_put_autosuspend(&sw->dev);
+		if (ret)
 			goto exit_unlock;
-		}
 
 		sw->nvm->authenticating = true;
 
@@ -944,17 +1265,16 @@
 			 */
 			nvm_authenticate_start(sw);
 			ret = nvm_authenticate_host(sw);
-			if (ret)
-				nvm_authenticate_complete(sw);
 		} else {
 			ret = nvm_authenticate_device(sw);
 		}
-		pm_runtime_mark_last_busy(&sw->dev);
-		pm_runtime_put_autosuspend(&sw->dev);
 	}
 
 exit_unlock:
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
+exit_rpm:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
 
 	if (ret)
 		return ret;
@@ -968,8 +1288,8 @@
 	struct tb_switch *sw = tb_to_switch(dev);
 	int ret;
 
-	if (mutex_lock_interruptible(&switch_lock))
-		return -ERESTARTSYS;
+	if (!mutex_trylock(&sw->tb->lock))
+		return restart_syscall();
 
 	if (sw->safe_mode)
 		ret = -ENODATA;
@@ -978,7 +1298,7 @@
 	else
 		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
 
-	mutex_unlock(&switch_lock);
+	mutex_unlock(&sw->tb->lock);
 
 	return ret;
 }
@@ -1031,14 +1351,29 @@
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct tb_switch *sw = tb_to_switch(dev);
 
-	if (attr == &dev_attr_key.attr) {
+	if (attr == &dev_attr_device.attr) {
+		if (!sw->device)
+			return 0;
+	} else if (attr == &dev_attr_device_name.attr) {
+		if (!sw->device_name)
+			return 0;
+	} else if (attr == &dev_attr_vendor.attr)  {
+		if (!sw->vendor)
+			return 0;
+	} else if (attr == &dev_attr_vendor_name.attr)  {
+		if (!sw->vendor_name)
+			return 0;
+	} else if (attr == &dev_attr_key.attr) {
 		if (tb_route(sw) &&
 		    sw->tb->security_level == TB_SECURITY_SECURE &&
 		    sw->security_level == TB_SECURITY_SECURE)
 			return attr->mode;
 		return 0;
-	} else if (attr == &dev_attr_nvm_authenticate.attr ||
-		   attr == &dev_attr_nvm_version.attr) {
+	} else if (attr == &dev_attr_nvm_authenticate.attr) {
+		if (sw->dma_port && !sw->no_nvm_upgrade)
+			return attr->mode;
+		return 0;
+	} else if (attr == &dev_attr_nvm_version.attr) {
 		if (sw->dma_port)
 			return attr->mode;
 		return 0;
@@ -1064,9 +1399,17 @@
 static void tb_switch_release(struct device *dev)
 {
 	struct tb_switch *sw = tb_to_switch(dev);
+	int i;
 
 	dma_port_free(sw->dma_port);
 
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (!sw->ports[i].disabled) {
+			ida_destroy(&sw->ports[i].in_hopids);
+			ida_destroy(&sw->ports[i].out_hopids);
+		}
+	}
+
 	kfree(sw->uuid);
 	kfree(sw->device_name);
 	kfree(sw->vendor_name);
@@ -1082,11 +1425,22 @@
  */
 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
 {
+	struct tb_switch *sw = tb_to_switch(dev);
+	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
+
+	if (cm_ops->runtime_suspend_switch)
+		return cm_ops->runtime_suspend_switch(sw);
+
 	return 0;
 }
 
 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
 {
+	struct tb_switch *sw = tb_to_switch(dev);
+	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
+
+	if (cm_ops->runtime_resume_switch)
+		return cm_ops->runtime_resume_switch(sw);
 	return 0;
 }
 
@@ -1127,6 +1481,8 @@
 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
 		return 3;
 
 	default:
@@ -1151,41 +1507,51 @@
  * separately. The returned switch should be released by calling
  * tb_switch_put().
  *
- * Return: Pointer to the allocated switch or %NULL in case of failure
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of
+ * failure.
  */
 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 				  u64 route)
 {
-	int i;
-	int cap;
 	struct tb_switch *sw;
-	int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
+	int upstream_port;
+	int i, ret, depth;
+
+	/* Make sure we do not exceed maximum topology limit */
+	depth = tb_route_length(route);
+	if (depth > TB_SWITCH_MAX_DEPTH)
+		return ERR_PTR(-EADDRNOTAVAIL);
+
+	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
 	if (upstream_port < 0)
-		return NULL;
+		return ERR_PTR(upstream_port);
 
 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
 	if (!sw)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	sw->tb = tb;
-	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
+	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
+	if (ret)
 		goto err_free_sw_ports;
 
-	tb_info(tb, "current switch config:\n");
+	tb_dbg(tb, "current switch config:\n");
 	tb_dump_switch(tb, &sw->config);
 
 	/* configure switch */
 	sw->config.upstream_port_number = upstream_port;
-	sw->config.depth = tb_route_length(route);
-	sw->config.route_lo = route;
-	sw->config.route_hi = route >> 32;
+	sw->config.depth = depth;
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
 	sw->config.enabled = 0;
 
 	/* initialize ports */
 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
 				GFP_KERNEL);
-	if (!sw->ports)
+	if (!sw->ports) {
+		ret = -ENOMEM;
 		goto err_free_sw_ports;
+	}
 
 	for (i = 0; i <= sw->config.max_port_number; i++) {
 		/* minimum setup for tb_find_cap and tb_drom_read to work */
@@ -1195,12 +1561,16 @@
 
 	sw->generation = tb_switch_get_generation(sw);
 
-	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
-	if (cap < 0) {
+	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+	if (ret < 0) {
 		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
 		goto err_free_sw_ports;
 	}
-	sw->cap_plug_events = cap;
+	sw->cap_plug_events = ret;
+
+	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+	if (ret > 0)
+		sw->cap_lc = ret;
 
 	/* Root switch is always authorized */
 	if (!route)
@@ -1219,7 +1589,7 @@
 	kfree(sw->ports);
 	kfree(sw);
 
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 /**
@@ -1234,7 +1604,7 @@
  *
  * The returned switch must be released by calling tb_switch_put().
  *
- * Return: Pointer to the allocated switch or %NULL in case of failure
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
  */
 struct tb_switch *
 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
@@ -1243,7 +1613,7 @@
 
 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
 	if (!sw)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	sw->tb = tb;
 	sw->config.depth = tb_route_length(route);
@@ -1278,9 +1648,8 @@
 	int ret;
 
 	route = tb_route(sw);
-	tb_info(tb,
-		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
-		route, tb_route_length(route), sw->config.upstream_port_number);
+	tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+	       route, tb_route_length(route), sw->config.upstream_port_number);
 
 	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
 		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@@ -1293,25 +1662,27 @@
 	if (ret)
 		return ret;
 
+	ret = tb_lc_configure_link(sw);
+	if (ret)
+		return ret;
+
 	return tb_plug_events_active(sw, true);
 }
 
-static void tb_switch_set_uuid(struct tb_switch *sw)
+static int tb_switch_set_uuid(struct tb_switch *sw)
 {
 	u32 uuid[4];
-	int cap;
+	int ret;
 
 	if (sw->uuid)
-		return;
+		return 0;
 
 	/*
 	 * The newer controllers include fused UUID as part of link
 	 * controller specific registers
 	 */
-	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
-	if (cap > 0) {
-		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
-	} else {
+	ret = tb_lc_read_uuid(sw, uuid);
+	if (ret) {
 		/*
 		 * ICM generates UUID based on UID and fills the upper
 		 * two words with ones. This is not strictly following
@@ -1325,6 +1696,9 @@
 	}
 
 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+	if (!sw->uuid)
+		return -ENOMEM;
+	return 0;
 }
 
 static int tb_switch_add_dma_port(struct tb_switch *sw)
@@ -1333,13 +1707,16 @@
 	int ret;
 
 	switch (sw->generation) {
-	case 3:
-		break;
-
 	case 2:
 		/* Only root switch can be upgraded */
 		if (tb_route(sw))
 			return 0;
+
+		/* fallthrough */
+	case 3:
+		ret = tb_switch_set_uuid(sw);
+		if (ret)
+			return ret;
 		break;
 
 	default:
@@ -1352,13 +1729,30 @@
 		break;
 	}
 
-	if (sw->no_nvm_upgrade)
+	/* Root switch DMA port requires running firmware */
+	if (!tb_route(sw) && sw->config.enabled)
 		return 0;
 
 	sw->dma_port = dma_port_alloc(sw);
 	if (!sw->dma_port)
 		return 0;
 
+	if (sw->no_nvm_upgrade)
+		return 0;
+
+	/*
+	 * If there is status already set then authentication failed
+	 * when the dma_port_flash_update_auth() returned. Power cycling
+	 * is not needed (it was done already) so only thing we do here
+	 * is to unblock runtime PM of the root port.
+	 */
+	nvm_get_auth_status(sw, &status);
+	if (status) {
+		if (!tb_route(sw))
+			nvm_authenticate_complete(sw);
+		return 0;
+	}
+
 	/*
 	 * Check status of the previous flash authentication. If there
 	 * is one we need to power cycle the switch in any case to make
@@ -1374,7 +1768,6 @@
 
 	if (status) {
 		tb_sw_info(sw, "switch flash authentication failed\n");
-		tb_switch_set_uuid(sw);
 		nvm_set_auth_status(sw, status);
 	}
 
@@ -1422,13 +1815,15 @@
 			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
 			return ret;
 		}
-		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
 
-		tb_switch_set_uuid(sw);
+		ret = tb_switch_set_uuid(sw);
+		if (ret)
+			return ret;
 
 		for (i = 0; i <= sw->config.max_port_number; i++) {
 			if (sw->ports[i].disabled) {
-				tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
 				continue;
 			}
 			ret = tb_init_port(&sw->ports[i]);
@@ -1441,6 +1836,14 @@
 	if (ret)
 		return ret;
 
+	if (tb_route(sw)) {
+		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
+			 sw->vendor, sw->device);
+		if (sw->vendor_name && sw->device_name)
+			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
+				 sw->device_name);
+	}
+
 	ret = tb_switch_nvm_add(sw);
 	if (ret) {
 		device_del(&sw->dev);
@@ -1478,20 +1881,23 @@
 
 	/* port 0 is the switch itself and never has a remote */
 	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_is_upstream_port(&sw->ports[i]))
-			continue;
-		if (sw->ports[i].remote)
+		if (tb_port_has_remote(&sw->ports[i])) {
 			tb_switch_remove(sw->ports[i].remote->sw);
-		sw->ports[i].remote = NULL;
-		if (sw->ports[i].xdomain)
+			sw->ports[i].remote = NULL;
+		} else if (sw->ports[i].xdomain) {
 			tb_xdomain_remove(sw->ports[i].xdomain);
-		sw->ports[i].xdomain = NULL;
+			sw->ports[i].xdomain = NULL;
+		}
 	}
 
 	if (!sw->is_unplugged)
 		tb_plug_events_active(sw, false);
+	tb_lc_unconfigure_link(sw);
 
 	tb_switch_nvm_remove(sw);
+
+	if (tb_route(sw))
+		dev_info(&sw->dev, "device disconnected\n");
 	device_unregister(&sw->dev);
 }
 
@@ -1511,15 +1917,17 @@
 	}
 	sw->is_unplugged = true;
 	for (i = 0; i <= sw->config.max_port_number; i++) {
-		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+		if (tb_port_has_remote(&sw->ports[i]))
 			tb_sw_set_unplugged(sw->ports[i].remote->sw);
+		else if (sw->ports[i].xdomain)
+			sw->ports[i].xdomain->is_unplugged = true;
 	}
 }
 
 int tb_switch_resume(struct tb_switch *sw)
 {
 	int i, err;
-	tb_sw_info(sw, "resuming switch\n");
+	tb_sw_dbg(sw, "resuming switch\n");
 
 	/*
 	 * Check for UID of the connected switches except for root
@@ -1528,6 +1936,17 @@
 	if (tb_route(sw)) {
 		u64 uid;
 
+		/*
+		 * Check first that we can still read the switch config
+		 * space. It may be that there is now another domain
+		 * connected.
+		 */
+		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
+		if (err < 0) {
+			tb_sw_info(sw, "switch not present anymore\n");
+			return err;
+		}
+
 		err = tb_drom_read_uid_only(sw, &uid);
 		if (err) {
 			tb_sw_warn(sw, "uid read failed\n");
@@ -1546,6 +1965,10 @@
 	if (err)
 		return err;
 
+	err = tb_lc_configure_link(sw);
+	if (err)
+		return err;
+
 	err = tb_plug_events_active(sw, true);
 	if (err)
 		return err;
@@ -1553,15 +1976,23 @@
 	/* check for surviving downstream switches */
 	for (i = 1; i <= sw->config.max_port_number; i++) {
 		struct tb_port *port = &sw->ports[i];
-		if (tb_is_upstream_port(port))
+
+		if (!tb_port_has_remote(port) && !port->xdomain)
 			continue;
-		if (!port->remote)
-			continue;
-		if (tb_wait_for_port(port, true) <= 0
-			|| tb_switch_resume(port->remote->sw)) {
+
+		if (tb_wait_for_port(port, true) <= 0) {
 			tb_port_warn(port,
 				     "lost during suspend, disconnecting\n");
-			tb_sw_set_unplugged(port->remote->sw);
+			if (tb_port_has_remote(port))
+				tb_sw_set_unplugged(port->remote->sw);
+			else if (port->xdomain)
+				port->xdomain->is_unplugged = true;
+		} else if (tb_port_has_remote(port)) {
+			if (tb_switch_resume(port->remote->sw)) {
+				tb_port_warn(port,
+					     "lost during suspend, disconnecting\n");
+				tb_sw_set_unplugged(port->remote->sw);
+			}
 		}
 	}
 	return 0;
@@ -1575,13 +2006,11 @@
 		return;
 
 	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+		if (tb_port_has_remote(&sw->ports[i]))
 			tb_switch_suspend(sw->ports[i].remote->sw);
 	}
-	/*
-	 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
-	 * effect?
-	 */
+
+	tb_lc_set_sleep(sw);
 }
 
 struct tb_sw_lookup {
@@ -1592,10 +2021,10 @@
 	u64 route;
 };
 
-static int tb_switch_match(struct device *dev, void *data)
+static int tb_switch_match(struct device *dev, const void *data)
 {
 	struct tb_switch *sw = tb_to_switch(dev);
-	struct tb_sw_lookup *lookup = data;
+	const struct tb_sw_lookup *lookup = data;
 
 	if (!sw)
 		return 0;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1424581..1f7a9e1 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ * Thunderbolt driver - bus logic (NHI independent)
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
  */
 
 #include <linux/slab.h>
@@ -12,7 +13,7 @@
 
 #include "tb.h"
 #include "tb_regs.h"
-#include "tunnel_pci.h"
+#include "tunnel.h"
 
 /**
  * struct tb_cm - Simple Thunderbolt connection manager
@@ -27,8 +28,100 @@
 	bool hotplug_active;
 };
 
+struct tb_hotplug_event {
+	struct work_struct work;
+	struct tb *tb;
+	u64 route;
+	u8 port;
+	bool unplug;
+};
+
+static void tb_handle_hotplug(struct work_struct *work);
+
+static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
+{
+	struct tb_hotplug_event *ev;
+
+	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev)
+		return;
+
+	ev->tb = tb;
+	ev->route = route;
+	ev->port = port;
+	ev->unplug = unplug;
+	INIT_WORK(&ev->work, tb_handle_hotplug);
+	queue_work(tb->wq, &ev->work);
+}
+
 /* enumeration & hot plug handling */
 
+static void tb_discover_tunnels(struct tb_switch *sw)
+{
+	struct tb *tb = sw->tb;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_port *port;
+	int i;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_tunnel *tunnel = NULL;
+
+		port = &sw->ports[i];
+		switch (port->config.type) {
+		case TB_TYPE_DP_HDMI_IN:
+			tunnel = tb_tunnel_discover_dp(tb, port);
+			break;
+
+		case TB_TYPE_PCIE_DOWN:
+			tunnel = tb_tunnel_discover_pci(tb, port);
+			break;
+
+		default:
+			break;
+		}
+
+		if (!tunnel)
+			continue;
+
+		if (tb_tunnel_is_pci(tunnel)) {
+			struct tb_switch *parent = tunnel->dst_port->sw;
+
+			while (parent != tunnel->src_port->sw) {
+				parent->boot = true;
+				parent = tb_switch_parent(parent);
+			}
+		}
+
+		list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	}
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (tb_port_has_remote(&sw->ports[i]))
+			tb_discover_tunnels(sw->ports[i].remote->sw);
+	}
+}
+
+static void tb_scan_xdomain(struct tb_port *port)
+{
+	struct tb_switch *sw = port->sw;
+	struct tb *tb = sw->tb;
+	struct tb_xdomain *xd;
+	u64 route;
+
+	route = tb_downstream_route(port);
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		tb_xdomain_put(xd);
+		return;
+	}
+
+	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
+			      NULL);
+	if (xd) {
+		tb_port_at(route, sw)->xdomain = xd;
+		tb_xdomain_add(xd);
+	}
+}
 
 static void tb_scan_port(struct tb_port *port);
 
@@ -47,9 +140,21 @@
  */
 static void tb_scan_port(struct tb_port *port)
 {
+	struct tb_cm *tcm = tb_priv(port->sw->tb);
+	struct tb_port *upstream_port;
 	struct tb_switch *sw;
+
 	if (tb_is_upstream_port(port))
 		return;
+
+	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
+	    !tb_dp_port_is_enabled(port)) {
+		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
+		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
+				 false);
+		return;
+	}
+
 	if (port->config.type != TB_TYPE_PORT)
 		return;
 	if (port->dual_link_port && port->link_nr)
@@ -60,45 +165,95 @@
 	if (tb_wait_for_port(port, false) <= 0)
 		return;
 	if (port->remote) {
-		tb_port_WARN(port, "port already has a remote!\n");
+		tb_port_dbg(port, "port already has a remote\n");
 		return;
 	}
 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
 			     tb_downstream_route(port));
-	if (!sw)
+	if (IS_ERR(sw)) {
+		/*
+		 * If there is an error accessing the connected switch
+		 * it may be connected to another domain. Also we allow
+		 * the other domain to be connected to a max depth switch.
+		 */
+		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
+			tb_scan_xdomain(port);
 		return;
+	}
 
 	if (tb_switch_configure(sw)) {
 		tb_switch_put(sw);
 		return;
 	}
 
-	sw->authorized = true;
+	/*
+	 * If there was previously another domain connected remove it
+	 * first.
+	 */
+	if (port->xdomain) {
+		tb_xdomain_remove(port->xdomain);
+		port->xdomain = NULL;
+	}
+
+	/*
+	 * Do not send uevents until we have discovered all existing
+	 * tunnels and know which switches were authorized already by
+	 * the boot firmware.
+	 */
+	if (!tcm->hotplug_active)
+		dev_set_uevent_suppress(&sw->dev, true);
 
 	if (tb_switch_add(sw)) {
 		tb_switch_put(sw);
 		return;
 	}
 
-	port->remote = tb_upstream_port(sw);
-	tb_upstream_port(sw)->remote = port;
+	/* Link the switches using both links if available */
+	upstream_port = tb_upstream_port(sw);
+	port->remote = upstream_port;
+	upstream_port->remote = port;
+	if (port->dual_link_port && upstream_port->dual_link_port) {
+		port->dual_link_port->remote = upstream_port->dual_link_port;
+		upstream_port->dual_link_port->remote = port->dual_link_port;
+	}
+
 	tb_scan_switch(sw);
 }
 
+static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
+			  struct tb_port *src_port, struct tb_port *dst_port)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel;
+
+	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+		if (tunnel->type == type &&
+		    ((src_port && src_port == tunnel->src_port) ||
+		     (dst_port && dst_port == tunnel->dst_port))) {
+			tb_tunnel_deactivate(tunnel);
+			list_del(&tunnel->list);
+			tb_tunnel_free(tunnel);
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
 /**
  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
  */
 static void tb_free_invalid_tunnels(struct tb *tb)
 {
 	struct tb_cm *tcm = tb_priv(tb);
-	struct tb_pci_tunnel *tunnel;
-	struct tb_pci_tunnel *n;
+	struct tb_tunnel *tunnel;
+	struct tb_tunnel *n;
 
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
-		if (tb_pci_is_invalid(tunnel)) {
-			tb_pci_deactivate(tunnel);
+		if (tb_tunnel_is_invalid(tunnel)) {
+			tb_tunnel_deactivate(tunnel);
 			list_del(&tunnel->list);
-			tb_pci_free(tunnel);
+			tb_tunnel_free(tunnel);
 		}
 	}
 }
@@ -111,137 +266,233 @@
 	int i;
 	for (i = 1; i <= sw->config.max_port_number; i++) {
 		struct tb_port *port = &sw->ports[i];
-		if (tb_is_upstream_port(port))
+
+		if (!tb_port_has_remote(port))
 			continue;
-		if (!port->remote)
-			continue;
+
 		if (port->remote->sw->is_unplugged) {
 			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
+			if (port->dual_link_port)
+				port->dual_link_port->remote = NULL;
 		} else {
 			tb_free_unplugged_children(port->remote->sw);
 		}
 	}
 }
 
-
 /**
- * find_pci_up_port() - return the first PCIe up port on @sw or NULL
+ * tb_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
  */
-static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
+static struct tb_port *tb_find_port(struct tb_switch *sw,
+				    enum tb_port_type type)
 {
 	int i;
 	for (i = 1; i <= sw->config.max_port_number; i++)
-		if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+		if (sw->ports[i].config.type == type)
 			return &sw->ports[i];
 	return NULL;
 }
 
 /**
- * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
  */
-static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+					   enum tb_port_type type)
 {
 	int i;
-	int cap;
-	int res;
-	int data;
+
 	for (i = 1; i <= sw->config.max_port_number; i++) {
 		if (tb_is_upstream_port(&sw->ports[i]))
 			continue;
-		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
+		if (sw->ports[i].config.type != type)
 			continue;
-		cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
-		if (cap < 0)
+		if (!sw->ports[i].cap_adap)
 			continue;
-		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
-		if (res < 0)
-			continue;
-		if (data & 0x80000000)
+		if (tb_port_is_enabled(&sw->ports[i]))
 			continue;
 		return &sw->ports[i];
 	}
 	return NULL;
 }
 
-/**
- * tb_activate_pcie_devices() - scan for and activate PCIe devices
- *
- * This method is somewhat ad hoc. For now it only supports one device
- * per port and only devices at depth 1.
- */
-static void tb_activate_pcie_devices(struct tb *tb)
+static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
+					 const struct tb_port *port)
 {
-	int i;
-	int cap;
-	u32 data;
-	struct tb_switch *sw;
-	struct tb_port *up_port;
-	struct tb_port *down_port;
-	struct tb_pci_tunnel *tunnel;
-	struct tb_cm *tcm = tb_priv(tb);
+	/*
+	 * To keep plugging devices consistently in the same PCIe
+	 * hierarchy, do mapping here for root switch downstream PCIe
+	 * ports.
+	 */
+	if (!tb_route(sw)) {
+		int phy_port = tb_phy_port_from_link(port->port);
+		int index;
 
-	/* scan for pcie devices at depth 1*/
-	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
-		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
-			continue;
-		if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
-			continue;
-		if (!tb->root_switch->ports[i].remote)
-			continue;
-		sw = tb->root_switch->ports[i].remote->sw;
-		up_port = tb_find_pci_up_port(sw);
-		if (!up_port) {
-			tb_sw_info(sw, "no PCIe devices found, aborting\n");
-			continue;
-		}
+		/*
+		 * Hard-coded Thunderbolt port to PCIe down port mapping
+		 * per controller.
+		 */
+		if (tb_switch_is_cr(sw))
+			index = !phy_port ? 6 : 7;
+		else if (tb_switch_is_fr(sw))
+			index = !phy_port ? 6 : 8;
+		else
+			goto out;
 
-		/* check whether port is already activated */
-		cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
-		if (cap < 0)
-			continue;
-		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
-			continue;
-		if (data & 0x80000000) {
-			tb_port_info(up_port,
-				     "PCIe port already activated, aborting\n");
-			continue;
-		}
+		/* Validate the hard-coding */
+		if (WARN_ON(index > sw->config.max_port_number))
+			goto out;
+		if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+			goto out;
+		if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+			goto out;
 
-		down_port = tb_find_unused_down_port(tb->root_switch);
-		if (!down_port) {
-			tb_port_info(up_port,
-				     "All PCIe down ports are occupied, aborting\n");
-			continue;
-		}
-		tunnel = tb_pci_alloc(tb, up_port, down_port);
-		if (!tunnel) {
-			tb_port_info(up_port,
-				     "PCIe tunnel allocation failed, aborting\n");
-			continue;
-		}
-
-		if (tb_pci_activate(tunnel)) {
-			tb_port_info(up_port,
-				     "PCIe tunnel activation failed, aborting\n");
-			tb_pci_free(tunnel);
-			continue;
-		}
-
-		list_add(&tunnel->list, &tcm->tunnel_list);
+		return &sw->ports[index];
 	}
+
+out:
+	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
+}
+
+static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_switch *sw = out->sw;
+	struct tb_tunnel *tunnel;
+	struct tb_port *in;
+
+	if (tb_port_is_enabled(out))
+		return 0;
+
+	do {
+		sw = tb_to_switch(sw->dev.parent);
+		if (!sw)
+			return 0;
+		in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
+	} while (!in);
+
+	tunnel = tb_tunnel_alloc_dp(tb, in, out);
+	if (!tunnel) {
+		tb_port_dbg(out, "DP tunnel allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (tb_tunnel_activate(tunnel)) {
+		tb_port_info(out, "DP tunnel activation failed, aborting\n");
+		tb_tunnel_free(tunnel);
+		return -EIO;
+	}
+
+	list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	return 0;
+}
+
+static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+{
+	tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+}
+
+static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_port *up, *down, *port;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_switch *parent_sw;
+	struct tb_tunnel *tunnel;
+
+	up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+	if (!up)
+		return 0;
+
+	/*
+	 * Look up available down port. Since we are chaining it should
+	 * be found right above this switch.
+	 */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	port = tb_port_at(tb_route(sw), parent_sw);
+	down = tb_find_pcie_down(parent_sw, port);
+	if (!down)
+		return 0;
+
+	tunnel = tb_tunnel_alloc_pci(tb, up, down);
+	if (!tunnel)
+		return -ENOMEM;
+
+	if (tb_tunnel_activate(tunnel)) {
+		tb_port_info(up,
+			     "PCIe tunnel activation failed, aborting\n");
+		tb_tunnel_free(tunnel);
+		return -EIO;
+	}
+
+	list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	return 0;
+}
+
+static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_port *nhi_port, *dst_port;
+	struct tb_tunnel *tunnel;
+	struct tb_switch *sw;
+
+	sw = tb_to_switch(xd->dev.parent);
+	dst_port = tb_port_at(xd->route, sw);
+	nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+
+	mutex_lock(&tb->lock);
+	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
+				     xd->transmit_path, xd->receive_ring,
+				     xd->receive_path);
+	if (!tunnel) {
+		mutex_unlock(&tb->lock);
+		return -ENOMEM;
+	}
+
+	if (tb_tunnel_activate(tunnel)) {
+		tb_port_info(nhi_port,
+			     "DMA tunnel activation failed, aborting\n");
+		tb_tunnel_free(tunnel);
+		mutex_unlock(&tb->lock);
+		return -EIO;
+	}
+
+	list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	mutex_unlock(&tb->lock);
+	return 0;
+}
+
+static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	struct tb_port *dst_port;
+	struct tb_switch *sw;
+
+	sw = tb_to_switch(xd->dev.parent);
+	dst_port = tb_port_at(xd->route, sw);
+
+	/*
+	 * It is possible that the tunnel was already teared down (in
+	 * case of cable disconnect) so it is fine if we cannot find it
+	 * here anymore.
+	 */
+	tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+}
+
+static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	if (!xd->is_unplugged) {
+		mutex_lock(&tb->lock);
+		__tb_disconnect_xdomain_paths(tb, xd);
+		mutex_unlock(&tb->lock);
+	}
+	return 0;
 }
 
 /* hotplug handling */
 
-struct tb_hotplug_event {
-	struct work_struct work;
-	struct tb *tb;
-	u64 route;
-	u8 port;
-	bool unplug;
-};
-
 /**
  * tb_handle_hotplug() - handle hotplug event
  *
@@ -258,7 +509,7 @@
 	if (!tcm->hotplug_active)
 		goto out; /* during init, suspend or shutdown */
 
-	sw = get_switch_at_route(tb->root_switch, ev->route);
+	sw = tb_switch_find_by_route(tb, ev->route);
 	if (!sw) {
 		tb_warn(tb,
 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
@@ -269,43 +520,60 @@
 		tb_warn(tb,
 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
 			ev->route, ev->port, ev->unplug);
-		goto out;
+		goto put_sw;
 	}
 	port = &sw->ports[ev->port];
 	if (tb_is_upstream_port(port)) {
-		tb_warn(tb,
-			"hotplug event for upstream port %llx:%x (unplug: %d)\n",
-			ev->route, ev->port, ev->unplug);
-		goto out;
+		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
+		       ev->route, ev->port, ev->unplug);
+		goto put_sw;
 	}
 	if (ev->unplug) {
-		if (port->remote) {
-			tb_port_info(port, "unplugged\n");
+		if (tb_port_has_remote(port)) {
+			tb_port_dbg(port, "switch unplugged\n");
 			tb_sw_set_unplugged(port->remote->sw);
 			tb_free_invalid_tunnels(tb);
 			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
+			if (port->dual_link_port)
+				port->dual_link_port->remote = NULL;
+		} else if (port->xdomain) {
+			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
+
+			tb_port_dbg(port, "xdomain unplugged\n");
+			/*
+			 * Service drivers are unbound during
+			 * tb_xdomain_remove() so setting XDomain as
+			 * unplugged here prevents deadlock if they call
+			 * tb_xdomain_disable_paths(). We will tear down
+			 * the path below.
+			 */
+			xd->is_unplugged = true;
+			tb_xdomain_remove(xd);
+			port->xdomain = NULL;
+			__tb_disconnect_xdomain_paths(tb, xd);
+			tb_xdomain_put(xd);
+		} else if (tb_port_is_dpout(port)) {
+			tb_teardown_dp(tb, port);
 		} else {
-			tb_port_info(port,
-				     "got unplug event for disconnected port, ignoring\n");
+			tb_port_dbg(port,
+				   "got unplug event for disconnected port, ignoring\n");
 		}
 	} else if (port->remote) {
-		tb_port_info(port,
-			     "got plug event for connected port, ignoring\n");
+		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
 	} else {
-		tb_port_info(port, "hotplug: scanning\n");
-		tb_scan_port(port);
-		if (!port->remote) {
-			tb_port_info(port, "hotplug: no switch found\n");
-		} else if (port->remote->sw->config.depth > 1) {
-			tb_sw_warn(port->remote->sw,
-				   "hotplug: chaining not supported\n");
-		} else {
-			tb_sw_info(port->remote->sw,
-				   "hotplug: activating pcie devices\n");
-			tb_activate_pcie_devices(tb);
+		if (tb_port_is_null(port)) {
+			tb_port_dbg(port, "hotplug: scanning\n");
+			tb_scan_port(port);
+			if (!port->remote)
+				tb_port_dbg(port, "hotplug: no switch found\n");
+		} else if (tb_port_is_dpout(port)) {
+			tb_tunnel_dp(tb, port);
 		}
 	}
+
+put_sw:
+	tb_switch_put(sw);
 out:
 	mutex_unlock(&tb->lock);
 	kfree(ev);
@@ -320,7 +588,6 @@
 			    const void *buf, size_t size)
 {
 	const struct cfg_event_pkg *pkg = buf;
-	struct tb_hotplug_event *ev;
 	u64 route;
 
 	if (type != TB_CFG_PKG_EVENT) {
@@ -336,40 +603,59 @@
 			pkg->port);
 	}
 
-	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
-	if (!ev)
-		return;
-	INIT_WORK(&ev->work, tb_handle_hotplug);
-	ev->tb = tb;
-	ev->route = route;
-	ev->port = pkg->port;
-	ev->unplug = pkg->unplug;
-	queue_work(tb->wq, &ev->work);
+	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
 }
 
 static void tb_stop(struct tb *tb)
 {
 	struct tb_cm *tcm = tb_priv(tb);
-	struct tb_pci_tunnel *tunnel;
-	struct tb_pci_tunnel *n;
+	struct tb_tunnel *tunnel;
+	struct tb_tunnel *n;
 
 	/* tunnels are only present after everything has been initialized */
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
-		tb_pci_deactivate(tunnel);
-		tb_pci_free(tunnel);
+		/*
+		 * DMA tunnels require the driver to be functional so we
+		 * tear them down. Other protocol tunnels can be left
+		 * intact.
+		 */
+		if (tb_tunnel_is_dma(tunnel))
+			tb_tunnel_deactivate(tunnel);
+		tb_tunnel_free(tunnel);
 	}
 	tb_switch_remove(tb->root_switch);
 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 }
 
+static int tb_scan_finalize_switch(struct device *dev, void *data)
+{
+	if (tb_is_switch(dev)) {
+		struct tb_switch *sw = tb_to_switch(dev);
+
+		/*
+		 * If we found that the switch was already setup by the
+		 * boot firmware, mark it as authorized now before we
+		 * send uevent to userspace.
+		 */
+		if (sw->boot)
+			sw->authorized = 1;
+
+		dev_set_uevent_suppress(dev, false);
+		kobject_uevent(&dev->kobj, KOBJ_ADD);
+		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
+	}
+
+	return 0;
+}
+
 static int tb_start(struct tb *tb)
 {
 	struct tb_cm *tcm = tb_priv(tb);
 	int ret;
 
 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
-	if (!tb->root_switch)
-		return -ENOMEM;
+	if (IS_ERR(tb->root_switch))
+		return PTR_ERR(tb->root_switch);
 
 	/*
 	 * ICM firmware upgrade needs running firmware and in native
@@ -393,7 +679,11 @@
 
 	/* Full scan to discover devices added before the driver was loaded. */
 	tb_scan_switch(tb->root_switch);
-	tb_activate_pcie_devices(tb);
+	/* Find out tunnels created by the boot firmware */
+	tb_discover_tunnels(tb->root_switch);
+	/* Make the discovered switches available to the userspace */
+	device_for_each_child(&tb->root_switch->dev, NULL,
+			      tb_scan_finalize_switch);
 
 	/* Allow tb_handle_hotplug to progress events */
 	tcm->hotplug_active = true;
@@ -404,10 +694,10 @@
 {
 	struct tb_cm *tcm = tb_priv(tb);
 
-	tb_info(tb, "suspending...\n");
+	tb_dbg(tb, "suspending...\n");
 	tb_switch_suspend(tb->root_switch);
 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
-	tb_info(tb, "suspend finished\n");
+	tb_dbg(tb, "suspend finished\n");
 
 	return 0;
 }
@@ -415,9 +705,9 @@
 static int tb_resume_noirq(struct tb *tb)
 {
 	struct tb_cm *tcm = tb_priv(tb);
-	struct tb_pci_tunnel *tunnel, *n;
+	struct tb_tunnel *tunnel, *n;
 
-	tb_info(tb, "resuming...\n");
+	tb_dbg(tb, "resuming...\n");
 
 	/* remove any pci devices the firmware might have setup */
 	tb_switch_reset(tb, 0);
@@ -426,28 +716,66 @@
 	tb_free_invalid_tunnels(tb);
 	tb_free_unplugged_children(tb->root_switch);
 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
-		tb_pci_restart(tunnel);
+		tb_tunnel_restart(tunnel);
 	if (!list_empty(&tcm->tunnel_list)) {
 		/*
 		 * the pcie links need some time to get going.
 		 * 100ms works for me...
 		 */
-		tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
+		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
 		msleep(100);
 	}
 	 /* Allow tb_handle_hotplug to progress events */
 	tcm->hotplug_active = true;
-	tb_info(tb, "resume finished\n");
+	tb_dbg(tb, "resume finished\n");
 
 	return 0;
 }
 
+static int tb_free_unplugged_xdomains(struct tb_switch *sw)
+{
+	int i, ret = 0;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+
+		if (tb_is_upstream_port(port))
+			continue;
+		if (port->xdomain && port->xdomain->is_unplugged) {
+			tb_xdomain_remove(port->xdomain);
+			port->xdomain = NULL;
+			ret++;
+		} else if (port->remote) {
+			ret += tb_free_unplugged_xdomains(port->remote->sw);
+		}
+	}
+
+	return ret;
+}
+
+static void tb_complete(struct tb *tb)
+{
+	/*
+	 * Release any unplugged XDomains and if there is a case where
+	 * another domain is swapped in place of unplugged XDomain we
+	 * need to run another rescan.
+	 */
+	mutex_lock(&tb->lock);
+	if (tb_free_unplugged_xdomains(tb->root_switch))
+		tb_scan_switch(tb->root_switch);
+	mutex_unlock(&tb->lock);
+}
+
 static const struct tb_cm_ops tb_cm_ops = {
 	.start = tb_start,
 	.stop = tb_stop,
 	.suspend_noirq = tb_suspend_noirq,
 	.resume_noirq = tb_resume_noirq,
+	.complete = tb_complete,
 	.handle_event = tb_handle_event,
+	.approve_switch = tb_tunnel_pci,
+	.approve_xdomain_paths = tb_approve_xdomain_paths,
+	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
 };
 
 struct tb *tb_probe(struct tb_nhi *nhi)
@@ -462,7 +790,7 @@
 	if (!tb)
 		return NULL;
 
-	tb->security_level = TB_SECURITY_NONE;
+	tb->security_level = TB_SECURITY_USER;
 	tb->cm_ops = &tb_cm_ops;
 
 	tcm = tb_priv(tb);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5067d69..6407d52 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,8 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ * Thunderbolt driver - bus logic (NHI independent)
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #ifndef TB_H_
@@ -42,6 +43,7 @@
 };
 
 #define TB_SWITCH_KEY_SIZE		32
+#define TB_SWITCH_MAX_DEPTH		6
 
 /**
  * struct tb_switch - a thunderbolt switch
@@ -61,6 +63,7 @@
  * @device_name: Name of the device (or %NULL if not known)
  * @generation: Switch Thunderbolt generation
  * @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @cap_lc: Offset to the link controller capability (%0 if not found)
  * @is_unplugged: The switch is going away
  * @drom: DROM of the switch (%NULL if not found)
  * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
@@ -69,7 +72,6 @@
  * @boot: Whether the switch was already authorized on boot or not
  * @rpm: The switch supports runtime PM
  * @authorized: Whether the switch is authorized by user or policy
- * @work: Work used to automatically authorize a switch
  * @security_level: Switch supported security level
  * @key: Contains the key used to challenge the device or %NULL if not
  *	 supported. Size of the key is %TB_SWITCH_KEY_SIZE.
@@ -77,10 +79,11 @@
  * @connection_key: Connection key used with ICM messaging
  * @link: Root switch link this switch is connected (ICM only)
  * @depth: Depth in the chain this switch is connected (ICM only)
+ * @rpm_complete: Completion used to wait for runtime resume to
+ *		  complete (ICM only)
  *
  * When the switch is being added or removed to the domain (other
- * switches) you need to have domain lock held. For switch authorization
- * internal switch_lock is enough.
+ * switches) you need to have domain lock held.
  */
 struct tb_switch {
 	struct device dev;
@@ -96,6 +99,7 @@
 	const char *device_name;
 	unsigned int generation;
 	int cap_plug_events;
+	int cap_lc;
 	bool is_unplugged;
 	u8 *drom;
 	struct tb_switch_nvm *nvm;
@@ -104,13 +108,13 @@
 	bool boot;
 	bool rpm;
 	unsigned int authorized;
-	struct work_struct work;
 	enum tb_security_level security_level;
 	u8 *key;
 	u8 connection_id;
 	u8 connection_key;
 	u8 link;
 	u8 depth;
+	struct completion rpm_complete;
 };
 
 /**
@@ -120,11 +124,14 @@
  * @remote: Remote port (%NULL if not connected)
  * @xdomain: Remote host (%NULL if not connected)
  * @cap_phy: Offset, zero if not found
+ * @cap_adap: Offset of the adapter specific capability (%0 if not present)
  * @port: Port number on switch
  * @disabled: Disabled by eeprom
  * @dual_link_port: If the switch is connected using two ports, points
  *		    to the other port.
  * @link_nr: Is this primary or secondary port on the dual_link.
+ * @in_hopids: Currently allocated input HopIDs
+ * @out_hopids: Currently allocated output HopIDs
  */
 struct tb_port {
 	struct tb_regs_port_header config;
@@ -132,19 +139,35 @@
 	struct tb_port *remote;
 	struct tb_xdomain *xdomain;
 	int cap_phy;
+	int cap_adap;
 	u8 port;
 	bool disabled;
 	struct tb_port *dual_link_port;
 	u8 link_nr:1;
+	struct ida in_hopids;
+	struct ida out_hopids;
 };
 
 /**
  * struct tb_path_hop - routing information for a tb_path
+ * @in_port: Ingress port of a switch
+ * @out_port: Egress port of a switch where the packet is routed out
+ *	      (must be on the same switch than @in_port)
+ * @in_hop_index: HopID where the path configuration entry is placed in
+ *		  the path config space of @in_port.
+ * @in_counter_index: Used counter index (not used in the driver
+ *		      currently, %-1 to disable)
+ * @next_hop_index: HopID of the packet when it is routed out from @out_port
+ * @initial_credits: Number of initial flow control credits allocated for
+ *		     the path
  *
  * Hop configuration is always done on the IN port of a switch.
  * in_port and out_port have to be on the same switch. Packets arriving on
  * in_port with "hop" = in_hop_index will get routed to through out_port. The
- * next hop to take (on out_port->remote) is determined by next_hop_index.
+ * next hop to take (on out_port->remote) is determined by
+ * next_hop_index. When routing packet to another switch (out->remote is
+ * set) the @next_hop_index must match the @in_hop_index of that next
+ * hop to make routing possible.
  *
  * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
  * port.
@@ -153,44 +176,71 @@
 	struct tb_port *in_port;
 	struct tb_port *out_port;
 	int in_hop_index;
-	int in_counter_index; /* write -1 to disable counters for this hop. */
+	int in_counter_index;
 	int next_hop_index;
+	unsigned int initial_credits;
 };
 
 /**
  * enum tb_path_port - path options mask
+ * @TB_PATH_NONE: Do not activate on any hop on path
+ * @TB_PATH_SOURCE: Activate on the first hop (out of src)
+ * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
+ * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
+ * @TB_PATH_ALL: Activate on all hops on the path
  */
 enum tb_path_port {
 	TB_PATH_NONE = 0,
-	TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
-	TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
-	TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
+	TB_PATH_SOURCE = 1,
+	TB_PATH_INTERNAL = 2,
+	TB_PATH_DESTINATION = 4,
 	TB_PATH_ALL = 7,
 };
 
 /**
  * struct tb_path - a unidirectional path between two ports
+ * @tb: Pointer to the domain structure
+ * @name: Name of the path (used for debugging)
+ * @nfc_credits: Number of non flow controlled credits allocated for the path
+ * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
+ * @egress_shared_buffer: Shared buffering used for egress ports on the path
+ * @ingress_fc_enable: Flow control for ingress ports on the path
+ * @egress_fc_enable: Flow control for egress ports on the path
+ * @priority: Priority group if the path
+ * @weight: Weight of the path inside the priority group
+ * @drop_packages: Drop packages from queue tail or head
+ * @activated: Is the path active
+ * @clear_fc: Clear all flow control from the path config space entries
+ *	      when deactivating this path
+ * @hops: Path hops
+ * @path_length: How many hops the path uses
  *
- * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
- * tunnel two paths have to be created between the two PCIe ports.
- *
+ * A path consists of a number of hops (see &struct tb_path_hop). To
+ * establish a PCIe tunnel two paths have to be created between the two
+ * PCIe ports.
  */
 struct tb_path {
 	struct tb *tb;
-	int nfc_credits; /* non flow controlled credits */
+	const char *name;
+	int nfc_credits;
 	enum tb_path_port ingress_shared_buffer;
 	enum tb_path_port egress_shared_buffer;
 	enum tb_path_port ingress_fc_enable;
 	enum tb_path_port egress_fc_enable;
 
-	int priority:3;
+	unsigned int priority:3;
 	int weight:4;
 	bool drop_packages;
 	bool activated;
+	bool clear_fc;
 	struct tb_path_hop *hops;
-	int path_length; /* number of hops */
+	int path_length;
 };
 
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define TB_PATH_MIN_HOPID	8
+#define TB_PATH_MAX_HOPS	7
+
 /**
  * struct tb_cm_ops - Connection manager specific operations vector
  * @driver_ready: Called right after control channel is started. Used by
@@ -203,6 +253,8 @@
  * @complete: Connection manager specific complete
  * @runtime_suspend: Connection manager specific runtime_suspend
  * @runtime_resume: Connection manager specific runtime_resume
+ * @runtime_suspend_switch: Runtime suspend a switch
+ * @runtime_resume_switch: Runtime resume a switch
  * @handle_event: Handle thunderbolt event
  * @get_boot_acl: Get boot ACL list
  * @set_boot_acl: Set boot ACL list
@@ -223,6 +275,8 @@
 	void (*complete)(struct tb *tb);
 	int (*runtime_suspend)(struct tb *tb);
 	int (*runtime_resume)(struct tb *tb);
+	int (*runtime_suspend_switch)(struct tb_switch *sw);
+	int (*runtime_resume_switch)(struct tb_switch *sw);
 	void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
 			     const void *buf, size_t size);
 	int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
@@ -260,7 +314,20 @@
 	return &sw->ports[sw->config.upstream_port_number];
 }
 
-static inline u64 tb_route(struct tb_switch *sw)
+/**
+ * tb_is_upstream_port() - Is the port upstream facing
+ * @port: Port to check
+ *
+ * Returns true if @port is upstream facing port. In case of dual link
+ * ports both return true.
+ */
+static inline bool tb_is_upstream_port(const struct tb_port *port)
+{
+	const struct tb_port *upstream_port = tb_upstream_port(port->sw);
+	return port == upstream_port || port->dual_link_port == upstream_port;
+}
+
+static inline u64 tb_route(const struct tb_switch *sw)
 {
 	return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
 }
@@ -275,9 +342,54 @@
 	return &sw->ports[port];
 }
 
+/**
+ * tb_port_has_remote() - Does the port have switch connected downstream
+ * @port: Port to check
+ *
+ * Returns true only when the port is primary port and has remote set.
+ */
+static inline bool tb_port_has_remote(const struct tb_port *port)
+{
+	if (tb_is_upstream_port(port))
+		return false;
+	if (!port->remote)
+		return false;
+	if (port->dual_link_port && port->link_nr)
+		return false;
+
+	return true;
+}
+
+static inline bool tb_port_is_null(const struct tb_port *port)
+{
+	return port && port->port && port->config.type == TB_TYPE_PORT;
+}
+
+static inline bool tb_port_is_pcie_down(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_PCIE_DOWN;
+}
+
+static inline bool tb_port_is_pcie_up(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_PCIE_UP;
+}
+
+static inline bool tb_port_is_dpin(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_DP_HDMI_IN;
+}
+
+static inline bool tb_port_is_dpout(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
+}
+
 static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
 			     enum tb_cfg_space space, u32 offset, u32 length)
 {
+	if (sw->is_unplugged)
+		return -ENODEV;
 	return tb_cfg_read(sw->tb->ctl,
 			   buffer,
 			   tb_route(sw),
@@ -290,6 +402,8 @@
 static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
 			      enum tb_cfg_space space, u32 offset, u32 length)
 {
+	if (sw->is_unplugged)
+		return -ENODEV;
 	return tb_cfg_write(sw->tb->ctl,
 			    buffer,
 			    tb_route(sw),
@@ -302,6 +416,8 @@
 static inline int tb_port_read(struct tb_port *port, void *buffer,
 			       enum tb_cfg_space space, u32 offset, u32 length)
 {
+	if (port->sw->is_unplugged)
+		return -ENODEV;
 	return tb_cfg_read(port->sw->tb->ctl,
 			   buffer,
 			   tb_route(port->sw),
@@ -314,6 +430,8 @@
 static inline int tb_port_write(struct tb_port *port, const void *buffer,
 				enum tb_cfg_space space, u32 offset, u32 length)
 {
+	if (port->sw->is_unplugged)
+		return -ENODEV;
 	return tb_cfg_write(port->sw->tb->ctl,
 			    buffer,
 			    tb_route(port->sw),
@@ -327,22 +445,22 @@
 #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
 #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
 #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
-
+#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
 
 #define __TB_SW_PRINT(level, sw, fmt, arg...)           \
 	do {                                            \
-		struct tb_switch *__sw = (sw);          \
+		const struct tb_switch *__sw = (sw);    \
 		level(__sw->tb, "%llx: " fmt,           \
 		      tb_route(__sw), ## arg);          \
 	} while (0)
 #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
 #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
 #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
-
+#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
 
 #define __TB_PORT_PRINT(level, _port, fmt, arg...)                      \
 	do {                                                            \
-		struct tb_port *__port = (_port);                       \
+		const struct tb_port *__port = (_port);                 \
 		level(__port->sw->tb, "%llx:%x: " fmt,                  \
 		      tb_route(__port->sw), __port->port, ## arg);      \
 	} while (0)
@@ -352,6 +470,8 @@
 	__TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
 #define tb_port_info(port, fmt, arg...) \
 	__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+#define tb_port_dbg(port, fmt, arg...) \
+	__TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
 
 struct tb *icm_probe(struct tb_nhi *nhi);
 struct tb *tb_probe(struct tb_nhi *nhi);
@@ -382,6 +502,13 @@
 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
 int tb_domain_disconnect_all_paths(struct tb *tb);
 
+static inline struct tb *tb_domain_get(struct tb *tb)
+{
+	if (tb)
+		get_device(&tb->dev);
+	return tb;
+}
+
 static inline void tb_domain_put(struct tb *tb)
 {
 	put_device(&tb->dev);
@@ -398,7 +525,6 @@
 int tb_switch_resume(struct tb_switch *sw);
 int tb_switch_reset(struct tb *tb, u64 route);
 void tb_sw_set_unplugged(struct tb_switch *sw);
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
 					       u8 depth);
 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -428,14 +554,74 @@
 	return NULL;
 }
 
+static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
+{
+	return tb_to_switch(sw->dev.parent);
+}
+
+static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+{
+	return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
+}
+
+static inline bool tb_switch_is_er(const struct tb_switch *sw)
+{
+	return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
+}
+
+static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+{
+	switch (sw->config.device_id) {
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+{
+	switch (sw->config.device_id) {
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+		return true;
+	default:
+		return false;
+	}
+}
+
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
 int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_in_hopid(struct tb_port *port, int hopid);
+int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_out_hopid(struct tb_port *port, int hopid);
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+				     struct tb_port *prev);
 
 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
 int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
+bool tb_port_is_enabled(struct tb_port *port);
 
-struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
+bool tb_pci_port_is_enabled(struct tb_port *port);
+int tb_pci_port_enable(struct tb_port *port, bool enable);
+
+int tb_dp_port_hpd_is_active(struct tb_port *port);
+int tb_dp_port_hpd_clear(struct tb_port *port);
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+			unsigned int aux_tx, unsigned int aux_rx);
+bool tb_dp_port_is_enabled(struct tb_port *port);
+int tb_dp_port_enable(struct tb_port *port, bool enable);
+
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+				 struct tb_port *dst, int dst_hopid,
+				 struct tb_port **last, const char *name);
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+			      struct tb_port *dst, int dst_hopid, int link_nr,
+			      const char *name);
 void tb_path_free(struct tb_path *path);
 int tb_path_activate(struct tb_path *path);
 void tb_path_deactivate(struct tb_path *path);
@@ -444,17 +630,16 @@
 int tb_drom_read(struct tb_switch *sw);
 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
 
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_configure_link(struct tb_switch *sw);
+void tb_lc_unconfigure_link(struct tb_switch *sw);
+int tb_lc_set_sleep(struct tb_switch *sw);
 
 static inline int tb_route_length(u64 route)
 {
 	return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
 }
 
-static inline bool tb_is_upstream_port(struct tb_port *port)
-{
-	return port == tb_upstream_port(port->sw);
-}
-
 /**
  * tb_downstream_route() - get route to downstream switch
  *
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 2487e16..4b641e4 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Thunderbolt control channel messages
  *
  * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
  * Copyright (C) 2017, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #ifndef _TB_MSGS
@@ -107,10 +104,11 @@
 };
 
 enum icm_event_code {
-	ICM_EVENT_DEVICE_CONNECTED = 3,
-	ICM_EVENT_DEVICE_DISCONNECTED = 4,
-	ICM_EVENT_XDOMAIN_CONNECTED = 6,
-	ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
+	ICM_EVENT_DEVICE_CONNECTED = 0x3,
+	ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
+	ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
+	ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+	ICM_EVENT_RTD3_VETO = 0xa,
 };
 
 struct icm_pkg_header {
@@ -466,6 +464,13 @@
 	uuid_t remote_uuid;
 };
 
+/* Ice Lake messages */
+
+struct icm_icl_event_rtd3_veto {
+	struct icm_pkg_header hdr;
+	u32 veto_reason;
+};
+
 /* XDomain messages */
 
 struct tb_xdomain_header {
@@ -495,6 +500,17 @@
 	u32 type;
 };
 
+struct tb_xdp_uuid {
+	struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_uuid_response {
+	struct tb_xdp_header hdr;
+	uuid_t src_uuid;
+	u32 src_route_hi;
+	u32 src_route_lo;
+};
+
 struct tb_xdp_properties {
 	struct tb_xdp_header hdr;
 	uuid_t src_uuid;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 693b035..deb9d4a 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,12 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
+ * Thunderbolt driver - Port/Switch config area registers
  *
  * Every thunderbolt device consists (logically) of a switch with multiple
  * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
  * COUNTERS) which are used to configure the device.
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
  */
 
 #ifndef _TB_REGS
@@ -210,6 +211,38 @@
 
 } __packed;
 
+/* DWORD 4 */
+#define TB_PORT_NFC_CREDITS_MASK	GENMASK(19, 0)
+#define TB_PORT_MAX_CREDITS_SHIFT	20
+#define TB_PORT_MAX_CREDITS_MASK	GENMASK(26, 20)
+/* DWORD 5 */
+#define TB_PORT_LCA_SHIFT		22
+#define TB_PORT_LCA_MASK		GENMASK(28, 22)
+
+/* Display Port adapter registers */
+
+/* DWORD 0 */
+#define TB_DP_VIDEO_HOPID_SHIFT		16
+#define TB_DP_VIDEO_HOPID_MASK		GENMASK(26, 16)
+#define TB_DP_AUX_EN			BIT(30)
+#define TB_DP_VIDEO_EN			BIT(31)
+/* DWORD 1 */
+#define TB_DP_AUX_TX_HOPID_MASK		GENMASK(10, 0)
+#define TB_DP_AUX_RX_HOPID_SHIFT	11
+#define TB_DP_AUX_RX_HOPID_MASK		GENMASK(21, 11)
+/* DWORD 2 */
+#define TB_DP_HDP			BIT(6)
+/* DWORD 3 */
+#define TB_DP_HPDC			BIT(9)
+/* DWORD 4 */
+#define TB_DP_LOCAL_CAP			0x4
+/* DWORD 5 */
+#define TB_DP_REMOTE_CAP		0x5
+
+/* PCIe adapter registers */
+
+#define TB_PCI_EN			BIT(31)
+
 /* Hop register from TB_CFG_HOPS. 8 byte per entry. */
 struct tb_regs_hop {
 	/* DWORD 0 */
@@ -233,8 +266,24 @@
 	bool egress_fc:1;
 	bool ingress_shared_buffer:1;
 	bool egress_shared_buffer:1;
-	u32 unknown3:4; /* set to zero */
+	bool pending:1;
+	u32 unknown3:3; /* set to zero */
 } __packed;
 
+/* Common link controller registers */
+#define TB_LC_DESC			0x02
+#define TB_LC_DESC_NLC_MASK		GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT		8
+#define TB_LC_DESC_SIZE_MASK		GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SIZE_SHIFT	16
+#define TB_LC_DESC_PORT_SIZE_MASK	GENMASK(27, 16)
+#define TB_LC_FUSE			0x03
+
+/* Link controller registers */
+#define TB_LC_SX_CTRL			0x96
+#define TB_LC_SX_CTRL_L1C		BIT(16)
+#define TB_LC_SX_CTRL_L2C		BIT(20)
+#define TB_LC_SX_CTRL_UPSTREAM		BIT(30)
+#define TB_LC_SX_CTRL_SLP		BIT(31)
 
 #endif
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
new file mode 100644
index 0000000..5a99234
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tunnel.h"
+#include "tb.h"
+
+/* PCIe adapters use always HopID of 8 for both directions */
+#define TB_PCI_HOPID			8
+
+#define TB_PCI_PATH_DOWN		0
+#define TB_PCI_PATH_UP			1
+
+/* DP adapters use HopID 8 for AUX and 9 for Video */
+#define TB_DP_AUX_TX_HOPID		8
+#define TB_DP_AUX_RX_HOPID		8
+#define TB_DP_VIDEO_HOPID		9
+
+#define TB_DP_VIDEO_PATH_OUT		0
+#define TB_DP_AUX_PATH_OUT		1
+#define TB_DP_AUX_PATH_IN		2
+
+#define TB_DMA_PATH_OUT			0
+#define TB_DMA_PATH_IN			1
+
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
+	do {                                                            \
+		struct tb_tunnel *__tunnel = (tunnel);                  \
+		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
+		      tb_route(__tunnel->src_port->sw),                 \
+		      __tunnel->src_port->port,                         \
+		      tb_route(__tunnel->dst_port->sw),                 \
+		      __tunnel->dst_port->port,                         \
+		      tb_tunnel_names[__tunnel->type],			\
+		      ## arg);                                          \
+	} while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+
+static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
+					 enum tb_tunnel_type type)
+{
+	struct tb_tunnel *tunnel;
+
+	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
+	if (!tunnel->paths) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&tunnel->list);
+	tunnel->tb = tb;
+	tunnel->npaths = npaths;
+	tunnel->type = type;
+
+	return tunnel;
+}
+
+static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
+{
+	int res;
+
+	res = tb_pci_port_enable(tunnel->src_port, activate);
+	if (res)
+		return res;
+
+	if (tb_port_is_pcie_up(tunnel->dst_port))
+		return tb_pci_port_enable(tunnel->dst_port, activate);
+
+	return 0;
+}
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 3;
+	path->weight = 1;
+	path->drop_packages = 0;
+	path->nfc_credits = 0;
+	path->hops[0].initial_credits = 7;
+	path->hops[1].initial_credits = 16;
+}
+
+/**
+ * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
+ * @tb: Pointer to the domain structure
+ * @down: PCIe downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the PCIe upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+
+	if (!tb_pci_port_is_enabled(down))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_pci_activate;
+	tunnel->src_port = down;
+
+	/*
+	 * Discover both paths even if they are not complete. We will
+	 * clean them up by calling tb_tunnel_deactivate() below in that
+	 * case.
+	 */
+	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
+				&tunnel->dst_port, "PCIe Up");
+	if (!path) {
+		/* Just disable the downstream port */
+		tb_pci_port_enable(down, false);
+		goto err_free;
+	}
+	tunnel->paths[TB_PCI_PATH_UP] = path;
+	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
+
+	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
+				"PCIe Down");
+	if (!path)
+		goto err_deactivate;
+	tunnel->paths[TB_PCI_PATH_DOWN] = path;
+	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
+
+	/* Validate that the tunnel is complete */
+	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
+		tb_port_warn(tunnel->dst_port,
+			     "path does not end on a PCIe adapter, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (down != tunnel->src_port) {
+		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
+		tb_tunnel_warn(tunnel,
+			       "tunnel is not fully activated, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	tb_tunnel_dbg(tunnel, "discovered\n");
+	return tunnel;
+
+err_deactivate:
+	tb_tunnel_deactivate(tunnel);
+err_free:
+	tb_tunnel_free(tunnel);
+
+	return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_pci() - allocate a pci tunnel
+ * @tb: Pointer to the domain structure
+ * @up: PCIe upstream adapter port
+ * @down: PCIe downstream adapter port
+ *
+ * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+				      struct tb_port *down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_pci_activate;
+	tunnel->src_port = down;
+	tunnel->dst_port = up;
+
+	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
+			     "PCIe Down");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_pci_init_path(path);
+	tunnel->paths[TB_PCI_PATH_DOWN] = path;
+
+	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
+			     "PCIe Up");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_pci_init_path(path);
+	tunnel->paths[TB_PCI_PATH_UP] = path;
+
+	return tunnel;
+}
+
+static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+{
+	struct tb_port *out = tunnel->dst_port;
+	struct tb_port *in = tunnel->src_port;
+	u32 in_dp_cap, out_dp_cap;
+	int ret;
+
+	/*
+	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
+	 * newer generation hardware.
+	 */
+	if (in->sw->generation < 2 || out->sw->generation < 2)
+		return 0;
+
+	/* Read both DP_LOCAL_CAP registers */
+	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
+			   in->cap_adap + TB_DP_LOCAL_CAP, 1);
+	if (ret)
+		return ret;
+
+	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
+			   out->cap_adap + TB_DP_LOCAL_CAP, 1);
+	if (ret)
+		return ret;
+
+	/* Write IN local caps to OUT remote caps */
+	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
+			    out->cap_adap + TB_DP_REMOTE_CAP, 1);
+	if (ret)
+		return ret;
+
+	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
+			     in->cap_adap + TB_DP_REMOTE_CAP, 1);
+}
+
+static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
+{
+	int ret;
+
+	if (active) {
+		struct tb_path **paths;
+		int last;
+
+		paths = tunnel->paths;
+		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
+
+		tb_dp_port_set_hops(tunnel->src_port,
+			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
+			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
+			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
+
+		tb_dp_port_set_hops(tunnel->dst_port,
+			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
+			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
+			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
+	} else {
+		tb_dp_port_hpd_clear(tunnel->src_port);
+		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
+		if (tb_port_is_dpout(tunnel->dst_port))
+			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
+	}
+
+	ret = tb_dp_port_enable(tunnel->src_port, active);
+	if (ret)
+		return ret;
+
+	if (tb_port_is_dpout(tunnel->dst_port))
+		return tb_dp_port_enable(tunnel->dst_port, active);
+
+	return 0;
+}
+
+static void tb_dp_init_aux_path(struct tb_path *path)
+{
+	int i;
+
+	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 2;
+	path->weight = 1;
+
+	for (i = 0; i < path->path_length; i++)
+		path->hops[i].initial_credits = 1;
+}
+
+static void tb_dp_init_video_path(struct tb_path *path, bool discover)
+{
+	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
+
+	path->egress_fc_enable = TB_PATH_NONE;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_NONE;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 1;
+	path->weight = 1;
+
+	if (discover) {
+		path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+	} else {
+		u32 max_credits;
+
+		max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
+			TB_PORT_MAX_CREDITS_SHIFT;
+		/* Leave some credits for AUX path */
+		path->nfc_credits = min(max_credits - 2, 12U);
+	}
+}
+
+/**
+ * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter
+ *
+ * If @in adapter is active, follows the tunnel to the DP out adapter
+ * and back. Returns the discovered tunnel or %NULL if there was no
+ * tunnel.
+ *
+ * Return: DP tunnel or %NULL if no tunnel found.
+ */
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_port *port;
+	struct tb_path *path;
+
+	if (!tb_dp_port_is_enabled(in))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->init = tb_dp_xchg_caps;
+	tunnel->activate = tb_dp_activate;
+	tunnel->src_port = in;
+
+	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
+				&tunnel->dst_port, "Video");
+	if (!path) {
+		/* Just disable the DP IN port */
+		tb_dp_port_enable(in, false);
+		goto err_free;
+	}
+	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
+	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
+
+	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
+	if (!path)
+		goto err_deactivate;
+	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
+	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+
+	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
+				&port, "AUX RX");
+	if (!path)
+		goto err_deactivate;
+	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
+	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+
+	/* Validate that the tunnel is complete */
+	if (!tb_port_is_dpout(tunnel->dst_port)) {
+		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (!tb_dp_port_is_enabled(tunnel->dst_port))
+		goto err_deactivate;
+
+	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
+		goto err_deactivate;
+
+	if (port != tunnel->src_port) {
+		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	tb_tunnel_dbg(tunnel, "discovered\n");
+	return tunnel;
+
+err_deactivate:
+	tb_tunnel_deactivate(tunnel);
+err_free:
+	tb_tunnel_free(tunnel);
+
+	return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter port
+ * @out: DP out adapter port
+ *
+ * Allocates a tunnel between @in and @out that is capable of tunneling
+ * Display Port traffic.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+				     struct tb_port *out)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path **paths;
+	struct tb_path *path;
+
+	if (WARN_ON(!in->cap_adap || !out->cap_adap))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->init = tb_dp_xchg_caps;
+	tunnel->activate = tb_dp_activate;
+	tunnel->src_port = in;
+	tunnel->dst_port = out;
+
+	paths = tunnel->paths;
+
+	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
+			     1, "Video");
+	if (!path)
+		goto err_free;
+	tb_dp_init_video_path(path, false);
+	paths[TB_DP_VIDEO_PATH_OUT] = path;
+
+	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
+			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+	if (!path)
+		goto err_free;
+	tb_dp_init_aux_path(path);
+	paths[TB_DP_AUX_PATH_OUT] = path;
+
+	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
+			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+	if (!path)
+		goto err_free;
+	tb_dp_init_aux_path(path);
+	paths[TB_DP_AUX_PATH_IN] = path;
+
+	return tunnel;
+
+err_free:
+	tb_tunnel_free(tunnel);
+	return NULL;
+}
+
+static u32 tb_dma_credits(struct tb_port *nhi)
+{
+	u32 max_credits;
+
+	max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
+		TB_PORT_MAX_CREDITS_SHIFT;
+	return min(max_credits, 13U);
+}
+
+static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
+{
+	struct tb_port *nhi = tunnel->src_port;
+	u32 credits;
+
+	credits = active ? tb_dma_credits(nhi) : 0;
+	return tb_port_set_initial_credits(nhi, credits);
+}
+
+static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
+			     unsigned int efc, u32 credits)
+{
+	int i;
+
+	path->egress_fc_enable = efc;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_shared_buffer = isb;
+	path->priority = 5;
+	path->weight = 1;
+	path->clear_fc = true;
+
+	for (i = 0; i < path->path_length; i++)
+		path->hops[i].initial_credits = credits;
+}
+
+/**
+ * tb_tunnel_alloc_dma() - allocate a DMA tunnel
+ * @tb: Pointer to the domain structure
+ * @nhi: Host controller port
+ * @dst: Destination null port which the other domain is connected to
+ * @transmit_ring: NHI ring number used to send packets towards the
+ *		   other domain
+ * @transmit_path: HopID used for transmitting packets
+ * @receive_ring: NHI ring number used to receive packets from the
+ *		  other domain
+ * @reveive_path: HopID used for receiving packets
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+				      struct tb_port *dst, int transmit_ring,
+				      int transmit_path, int receive_ring,
+				      int receive_path)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+	u32 credits;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_dma_activate;
+	tunnel->src_port = nhi;
+	tunnel->dst_port = dst;
+
+	credits = tb_dma_credits(nhi);
+
+	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
+			 credits);
+	tunnel->paths[TB_DMA_PATH_IN] = path;
+
+	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+	tunnel->paths[TB_DMA_PATH_OUT] = path;
+
+	return tunnel;
+}
+
+/**
+ * tb_tunnel_free() - free a tunnel
+ * @tunnel: Tunnel to be freed
+ *
+ * Frees a tunnel. The tunnel does not need to be deactivated.
+ */
+void tb_tunnel_free(struct tb_tunnel *tunnel)
+{
+	int i;
+
+	if (!tunnel)
+		return;
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (tunnel->paths[i])
+			tb_path_free(tunnel->paths[i]);
+	}
+
+	kfree(tunnel->paths);
+	kfree(tunnel);
+}
+
+/**
+ * tb_tunnel_is_invalid - check whether an activated path is still valid
+ * @tunnel: Tunnel to check
+ */
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
+{
+	int i;
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		WARN_ON(!tunnel->paths[i]->activated);
+		if (tb_path_is_invalid(tunnel->paths[i]))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * tb_tunnel_restart() - activate a tunnel after a hardware reset
+ * @tunnel: Tunnel to restart
+ *
+ * Return: 0 on success and negative errno in case if failure
+ */
+int tb_tunnel_restart(struct tb_tunnel *tunnel)
+{
+	int res, i;
+
+	tb_tunnel_dbg(tunnel, "activating\n");
+
+	/*
+	 * Make sure all paths are properly disabled before enabling
+	 * them again.
+	 */
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (tunnel->paths[i]->activated) {
+			tb_path_deactivate(tunnel->paths[i]);
+			tunnel->paths[i]->activated = false;
+		}
+	}
+
+	if (tunnel->init) {
+		res = tunnel->init(tunnel);
+		if (res)
+			return res;
+	}
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		res = tb_path_activate(tunnel->paths[i]);
+		if (res)
+			goto err;
+	}
+
+	if (tunnel->activate) {
+		res = tunnel->activate(tunnel, true);
+		if (res)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	tb_tunnel_warn(tunnel, "activation failed\n");
+	tb_tunnel_deactivate(tunnel);
+	return res;
+}
+
+/**
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
+{
+	int i;
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (tunnel->paths[i]->activated) {
+			tb_tunnel_WARN(tunnel,
+				       "trying to activate an already activated tunnel\n");
+			return -EINVAL;
+		}
+	}
+
+	return tb_tunnel_restart(tunnel);
+}
+
+/**
+ * tb_tunnel_deactivate() - deactivate a tunnel
+ * @tunnel: Tunnel to deactivate
+ */
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
+{
+	int i;
+
+	tb_tunnel_dbg(tunnel, "deactivating\n");
+
+	if (tunnel->activate)
+		tunnel->activate(tunnel, false);
+
+	for (i = 0; i < tunnel->npaths; i++) {
+		if (tunnel->paths[i] && tunnel->paths[i]->activated)
+			tb_path_deactivate(tunnel->paths[i]);
+	}
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
new file mode 100644
index 0000000..c68bbcd
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef TB_TUNNEL_H_
+#define TB_TUNNEL_H_
+
+#include "tb.h"
+
+enum tb_tunnel_type {
+	TB_TUNNEL_PCI,
+	TB_TUNNEL_DP,
+	TB_TUNNEL_DMA,
+};
+
+/**
+ * struct tb_tunnel - Tunnel between two ports
+ * @tb: Pointer to the domain
+ * @src_port: Source port of the tunnel
+ * @dst_port: Destination port of the tunnel. For discovered incomplete
+ *	      tunnels may be %NULL or null adapter port instead.
+ * @paths: All paths required by the tunnel
+ * @npaths: Number of paths in @paths
+ * @init: Optional tunnel specific initialization
+ * @activate: Optional tunnel specific activation/deactivation
+ * @list: Tunnels are linked using this field
+ * @type: Type of the tunnel
+ */
+struct tb_tunnel {
+	struct tb *tb;
+	struct tb_port *src_port;
+	struct tb_port *dst_port;
+	struct tb_path **paths;
+	size_t npaths;
+	int (*init)(struct tb_tunnel *tunnel);
+	int (*activate)(struct tb_tunnel *tunnel, bool activate);
+	struct list_head list;
+	enum tb_tunnel_type type;
+};
+
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+				      struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+				     struct tb_port *out);
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+				      struct tb_port *dst, int transmit_ring,
+				      int transmit_path, int receive_ring,
+				      int receive_path);
+
+void tb_tunnel_free(struct tb_tunnel *tunnel);
+int tb_tunnel_activate(struct tb_tunnel *tunnel);
+int tb_tunnel_restart(struct tb_tunnel *tunnel);
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+
+static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
+{
+	return tunnel->type == TB_TUNNEL_PCI;
+}
+
+static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
+{
+	return tunnel->type == TB_TUNNEL_DP;
+}
+
+static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
+{
+	return tunnel->type == TB_TUNNEL_DMA;
+}
+
+#endif
+
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
deleted file mode 100644
index 0637537..0000000
--- a/drivers/thunderbolt/tunnel_pci.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Thunderbolt Cactus Ridge driver - PCIe tunnel
- *
- * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
- */
-
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tunnel_pci.h"
-#include "tb.h"
-
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
-	do {                                                            \
-		struct tb_pci_tunnel *__tunnel = (tunnel);              \
-		level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt,  \
-		      tb_route(__tunnel->down_port->sw),                \
-		      __tunnel->down_port->port,                        \
-		      tb_route(__tunnel->up_port->sw),                  \
-		      __tunnel->up_port->port,                          \
-		      ## arg);                                          \
-	} while (0)
-
-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
-	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
-#define tb_tunnel_warn(tunnel, fmt, arg...) \
-	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
-#define tb_tunnel_info(tunnel, fmt, arg...) \
-	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
-
-static void tb_pci_init_path(struct tb_path *path)
-{
-	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
-	path->egress_shared_buffer = TB_PATH_NONE;
-	path->ingress_fc_enable = TB_PATH_ALL;
-	path->ingress_shared_buffer = TB_PATH_NONE;
-	path->priority = 3;
-	path->weight = 1;
-	path->drop_packages = 0;
-	path->nfc_credits = 0;
-}
-
-/**
- * tb_pci_alloc() - allocate a pci tunnel
- *
- * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
- * TB_TYPE_PCIE_DOWN.
- *
- * Currently only paths consisting of two hops are supported (that is the
- * ports must be on "adjacent" switches).
- *
- * The paths are hard-coded to use hop 8 (the only working hop id available on
- * my thunderbolt devices). Therefore at most ONE path per device may be
- * activated.
- *
- * Return: Returns a tb_pci_tunnel on success or NULL on failure.
- */
-struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
-				   struct tb_port *down)
-{
-	struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
-	if (!tunnel)
-		goto err;
-	tunnel->tb = tb;
-	tunnel->down_port = down;
-	tunnel->up_port = up;
-	INIT_LIST_HEAD(&tunnel->list);
-	tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
-	if (!tunnel->path_to_up)
-		goto err;
-	tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
-	if (!tunnel->path_to_down)
-		goto err;
-	tb_pci_init_path(tunnel->path_to_up);
-	tb_pci_init_path(tunnel->path_to_down);
-
-	tunnel->path_to_up->hops[0].in_port = down;
-	tunnel->path_to_up->hops[0].in_hop_index = 8;
-	tunnel->path_to_up->hops[0].in_counter_index = -1;
-	tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
-	tunnel->path_to_up->hops[0].next_hop_index = 8;
-
-	tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
-	tunnel->path_to_up->hops[1].in_hop_index = 8;
-	tunnel->path_to_up->hops[1].in_counter_index = -1;
-	tunnel->path_to_up->hops[1].out_port = up;
-	tunnel->path_to_up->hops[1].next_hop_index = 8;
-
-	tunnel->path_to_down->hops[0].in_port = up;
-	tunnel->path_to_down->hops[0].in_hop_index = 8;
-	tunnel->path_to_down->hops[0].in_counter_index = -1;
-	tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
-	tunnel->path_to_down->hops[0].next_hop_index = 8;
-
-	tunnel->path_to_down->hops[1].in_port =
-		tb_upstream_port(up->sw)->remote;
-	tunnel->path_to_down->hops[1].in_hop_index = 8;
-	tunnel->path_to_down->hops[1].in_counter_index = -1;
-	tunnel->path_to_down->hops[1].out_port = down;
-	tunnel->path_to_down->hops[1].next_hop_index = 8;
-	return tunnel;
-
-err:
-	if (tunnel) {
-		if (tunnel->path_to_down)
-			tb_path_free(tunnel->path_to_down);
-		if (tunnel->path_to_up)
-			tb_path_free(tunnel->path_to_up);
-		kfree(tunnel);
-	}
-	return NULL;
-}
-
-/**
- * tb_pci_free() - free a tunnel
- *
- * The tunnel must have been deactivated.
- */
-void tb_pci_free(struct tb_pci_tunnel *tunnel)
-{
-	if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
-		tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
-		return;
-	}
-	tb_path_free(tunnel->path_to_up);
-	tb_path_free(tunnel->path_to_down);
-	kfree(tunnel);
-}
-
-/**
- * tb_pci_is_invalid - check whether an activated path is still valid
- */
-bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
-{
-	WARN_ON(!tunnel->path_to_up->activated);
-	WARN_ON(!tunnel->path_to_down->activated);
-
-	return tb_path_is_invalid(tunnel->path_to_up)
-	       || tb_path_is_invalid(tunnel->path_to_down);
-}
-
-/**
- * tb_pci_port_active() - activate/deactivate PCI capability
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-static int tb_pci_port_active(struct tb_port *port, bool active)
-{
-	u32 word = active ? 0x80000000 : 0x0;
-	int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
-	if (cap < 0) {
-		tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
-		return cap;
-	}
-	return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
-}
-
-/**
- * tb_pci_restart() - activate a tunnel after a hardware reset
- */
-int tb_pci_restart(struct tb_pci_tunnel *tunnel)
-{
-	int res;
-	tunnel->path_to_up->activated = false;
-	tunnel->path_to_down->activated = false;
-
-	tb_tunnel_info(tunnel, "activating\n");
-
-	res = tb_path_activate(tunnel->path_to_up);
-	if (res)
-		goto err;
-	res = tb_path_activate(tunnel->path_to_down);
-	if (res)
-		goto err;
-
-	res = tb_pci_port_active(tunnel->down_port, true);
-	if (res)
-		goto err;
-
-	res = tb_pci_port_active(tunnel->up_port, true);
-	if (res)
-		goto err;
-	return 0;
-err:
-	tb_tunnel_warn(tunnel, "activation failed\n");
-	tb_pci_deactivate(tunnel);
-	return res;
-}
-
-/**
- * tb_pci_activate() - activate a tunnel
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_pci_activate(struct tb_pci_tunnel *tunnel)
-{
-	if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
-		tb_tunnel_WARN(tunnel,
-			       "trying to activate an already activated tunnel\n");
-		return -EINVAL;
-	}
-
-	return tb_pci_restart(tunnel);
-}
-
-
-
-/**
- * tb_pci_deactivate() - deactivate a tunnel
- */
-void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
-{
-	tb_tunnel_info(tunnel, "deactivating\n");
-	/*
-	 * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
-	 * port. Seems to have no effect?
-	 */
-	tb_pci_port_active(tunnel->up_port, false);
-	tb_pci_port_active(tunnel->down_port, false);
-	if (tunnel->path_to_down->activated)
-		tb_path_deactivate(tunnel->path_to_down);
-	if (tunnel->path_to_up->activated)
-		tb_path_deactivate(tunnel->path_to_up);
-}
-
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
deleted file mode 100644
index f9b65fa..0000000
--- a/drivers/thunderbolt/tunnel_pci.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Thunderbolt Cactus Ridge driver - PCIe tunnel
- *
- * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
- */
-
-#ifndef TB_PCI_H_
-#define TB_PCI_H_
-
-#include "tb.h"
-
-struct tb_pci_tunnel {
-	struct tb *tb;
-	struct tb_port *up_port;
-	struct tb_port *down_port;
-	struct tb_path *path_to_up;
-	struct tb_path *path_to_down;
-	struct list_head list;
-};
-
-struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
-				   struct tb_port *down);
-void tb_pci_free(struct tb_pci_tunnel *tunnel);
-int tb_pci_activate(struct tb_pci_tunnel *tunnel);
-int tb_pci_restart(struct tb_pci_tunnel *tunnel);
-void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
-bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
-
-#endif
-
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index db8bece..4e17a7c 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Thunderbolt XDomain discovery protocol support
  *
  * Copyright (C) 2017, Intel Corporation
  * Authors: Michael Jamet <michael.jamet@intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/device.h>
@@ -21,6 +18,7 @@
 #include "tb.h"
 
 #define XDOMAIN_DEFAULT_TIMEOUT			5000 /* ms */
+#define XDOMAIN_UUID_RETRIES			10
 #define XDOMAIN_PROPERTIES_RETRIES		60
 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES	10
 
@@ -225,6 +223,50 @@
 	return 0;
 }
 
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
+			       uuid_t *uuid)
+{
+	struct tb_xdp_uuid_response res;
+	struct tb_xdp_uuid req;
+	int ret;
+
+	memset(&req, 0, sizeof(req));
+	tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
+			   sizeof(req));
+
+	memset(&res, 0, sizeof(res));
+	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+				   TB_CFG_PKG_XDOMAIN_RESP,
+				   XDOMAIN_DEFAULT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	ret = tb_xdp_handle_error(&res.hdr);
+	if (ret)
+		return ret;
+
+	uuid_copy(uuid, &res.src_uuid);
+	return 0;
+}
+
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+				const uuid_t *uuid)
+{
+	struct tb_xdp_uuid_response res;
+
+	memset(&res, 0, sizeof(res));
+	tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
+			   sizeof(res));
+
+	uuid_copy(&res.src_uuid, uuid);
+	res.src_route_hi = upper_32_bits(route);
+	res.src_route_lo = lower_32_bits(route);
+
+	return __tb_xdomain_response(ctl, &res, sizeof(res),
+				     TB_CFG_PKG_XDOMAIN_RESP);
+}
+
 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 				 enum tb_xdp_error error)
 {
@@ -515,7 +557,14 @@
 		break;
 	}
 
+	case UUID_REQUEST_OLD:
+	case UUID_REQUEST:
+		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+		break;
+
 	default:
+		tb_xdp_error_response(ctl, route, sequence,
+				      ERROR_NOT_SUPPORTED);
 		break;
 	}
 
@@ -527,9 +576,11 @@
 out:
 	kfree(xw->pkg);
 	kfree(xw);
+
+	tb_domain_put(tb);
 }
 
-static void
+static bool
 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
 			size_t size)
 {
@@ -537,13 +588,18 @@
 
 	xw = kmalloc(sizeof(*xw), GFP_KERNEL);
 	if (!xw)
-		return;
+		return false;
 
 	INIT_WORK(&xw->work, tb_xdp_handle_request);
 	xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
-	xw->tb = tb;
+	if (!xw->pkg) {
+		kfree(xw);
+		return false;
+	}
+	xw->tb = tb_domain_get(tb);
 
-	queue_work(tb->wq, &xw->work);
+	schedule_work(&xw->work);
+	return true;
 }
 
 /**
@@ -580,7 +636,7 @@
 	 * It should be null terminated but anything else is pretty much
 	 * allowed.
 	 */
-	return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+	return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
 }
 static DEVICE_ATTR_RO(key);
 
@@ -743,6 +799,7 @@
 	struct tb_service *svc;
 	struct tb_property *p;
 	struct device *dev;
+	int id;
 
 	/*
 	 * First remove all services that are not available anymore in
@@ -771,7 +828,12 @@
 			break;
 		}
 
-		svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+		if (id < 0) {
+			kfree(svc);
+			break;
+		}
+		svc->id = id;
 		svc->dev.bus = &tb_bus_type;
 		svc->dev.type = &tb_service_type;
 		svc->dev.parent = &xd->dev;
@@ -829,6 +891,55 @@
 	}
 }
 
+static void tb_xdomain_get_uuid(struct work_struct *work)
+{
+	struct tb_xdomain *xd = container_of(work, typeof(*xd),
+					     get_uuid_work.work);
+	struct tb *tb = xd->tb;
+	uuid_t uuid;
+	int ret;
+
+	ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+	if (ret < 0) {
+		if (xd->uuid_retries-- > 0) {
+			queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+					   msecs_to_jiffies(100));
+		} else {
+			dev_dbg(&xd->dev, "failed to read remote UUID\n");
+		}
+		return;
+	}
+
+	if (uuid_equal(&uuid, xd->local_uuid)) {
+		dev_dbg(&xd->dev, "intra-domain loop detected\n");
+		return;
+	}
+
+	/*
+	 * If the UUID is different, there is another domain connected
+	 * so mark this one unplugged and wait for the connection
+	 * manager to replace it.
+	 */
+	if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
+		dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
+		xd->is_unplugged = true;
+		return;
+	}
+
+	/* First time fill in the missing UUID */
+	if (!xd->remote_uuid) {
+		xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
+		if (!xd->remote_uuid)
+			return;
+	}
+
+	/* Now we can start the normal properties exchange */
+	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+			   msecs_to_jiffies(100));
+	queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+			   msecs_to_jiffies(1000));
+}
+
 static void tb_xdomain_get_properties(struct work_struct *work)
 {
 	struct tb_xdomain *xd = container_of(work, typeof(*xd),
@@ -1035,21 +1146,29 @@
 
 static void start_handshake(struct tb_xdomain *xd)
 {
+	xd->uuid_retries = XDOMAIN_UUID_RETRIES;
 	xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
 	xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
 
-	/* Start exchanging properties with the other host */
-	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
-			   msecs_to_jiffies(100));
-	queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
-			   msecs_to_jiffies(1000));
+	if (xd->needs_uuid) {
+		queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+				   msecs_to_jiffies(100));
+	} else {
+		/* Start exchanging properties with the other host */
+		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+				   msecs_to_jiffies(100));
+		queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+				   msecs_to_jiffies(1000));
+	}
 }
 
 static void stop_handshake(struct tb_xdomain *xd)
 {
+	xd->uuid_retries = 0;
 	xd->properties_retries = 0;
 	xd->properties_changed_retries = 0;
 
+	cancel_delayed_work_sync(&xd->get_uuid_work);
 	cancel_delayed_work_sync(&xd->get_properties_work);
 	cancel_delayed_work_sync(&xd->properties_changed_work);
 }
@@ -1092,7 +1211,7 @@
  *	    other domain is reached).
  * @route: Route string used to reach the other domain
  * @local_uuid: Our local domain UUID
- * @remote_uuid: UUID of the other domain
+ * @remote_uuid: UUID of the other domain (optional)
  *
  * Allocates new XDomain structure and returns pointer to that. The
  * object must be released by calling tb_xdomain_put().
@@ -1111,6 +1230,7 @@
 	xd->route = route;
 	ida_init(&xd->service_ids);
 	mutex_init(&xd->lock);
+	INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
 	INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
 	INIT_DELAYED_WORK(&xd->properties_changed_work,
 			  tb_xdomain_properties_changed);
@@ -1119,9 +1239,14 @@
 	if (!xd->local_uuid)
 		goto err_free;
 
-	xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
-	if (!xd->remote_uuid)
-		goto err_free_local_uuid;
+	if (remote_uuid) {
+		xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
+					  GFP_KERNEL);
+		if (!xd->remote_uuid)
+			goto err_free_local_uuid;
+	} else {
+		xd->needs_uuid = true;
+	}
 
 	device_initialize(&xd->dev);
 	xd->dev.parent = get_device(parent);
@@ -1285,14 +1410,12 @@
 		struct tb_port *port = &sw->ports[i];
 		struct tb_xdomain *xd;
 
-		if (tb_is_upstream_port(port))
-			continue;
-
 		if (port->xdomain) {
 			xd = port->xdomain;
 
 			if (lookup->uuid) {
-				if (uuid_equal(xd->remote_uuid, lookup->uuid))
+				if (xd->remote_uuid &&
+				    uuid_equal(xd->remote_uuid, lookup->uuid))
 					return xd;
 			} else if (lookup->link &&
 				   lookup->link == xd->link &&
@@ -1302,7 +1425,7 @@
 				   lookup->route == xd->route) {
 				return xd;
 			}
-		} else if (port->remote) {
+		} else if (tb_port_has_remote(port)) {
 			xd = switch_find_xdomain(port->remote->sw, lookup);
 			if (xd)
 				return xd;
@@ -1419,10 +1542,8 @@
 	 * handlers in turn.
 	 */
 	if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
-		if (type == TB_CFG_PKG_XDOMAIN_REQ) {
-			tb_xdp_schedule_request(tb, hdr, size);
-			return true;
-		}
+		if (type == TB_CFG_PKG_XDOMAIN_REQ)
+			return tb_xdp_schedule_request(tb, hdr, size);
 		return false;
 	}