v4.19.13 snapshot.
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644
index 0000000..f4869c3
--- /dev/null
+++ b/drivers/thunderbolt/Kconfig
@@ -0,0 +1,16 @@
+menuconfig THUNDERBOLT
+	tristate "Thunderbolt support"
+	depends on PCI
+	depends on X86 || COMPILE_TEST
+	select APPLE_PROPERTIES if EFI_STUB && X86
+	select CRC32
+	select CRYPTO
+	select CRYPTO_HASH
+	select NVMEM
+	help
+	  Thunderbolt Controller driver. This driver is required if you
+	  want to hotplug Thunderbolt devices on Apple hardware or on PCs
+	  with Intel Falcon Ridge or newer.
+
+	  To compile this driver a module, choose M here. The module will be
+	  called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644
index 0000000..f2f0de2
--- /dev/null
+++ b/drivers/thunderbolt/Makefile
@@ -0,0 +1,3 @@
+obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
new file mode 100644
index 0000000..c2277b8
--- /dev/null
+++ b/drivers/thunderbolt/cap.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - capabilities lookup
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include "tb.h"
+
+#define CAP_OFFSET_MAX		0xff
+#define VSE_CAP_OFFSET_MAX	0xffff
+
+struct tb_cap_any {
+	union {
+		struct tb_cap_basic basic;
+		struct tb_cap_extended_short extended_short;
+		struct tb_cap_extended_long extended_long;
+	};
+} __packed;
+
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+	u32 offset;
+
+	/*
+	 * DP out adapters claim to implement TMU capability but in
+	 * reality they do not so we hard code the adapter specific
+	 * capability offset here.
+	 */
+	if (port->config.type == TB_TYPE_DP_HDMI_OUT)
+		offset = 0x39;
+	else
+		offset = 0x1;
+
+	do {
+		struct tb_cap_any header;
+		int ret;
+
+		ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+		if (ret)
+			return ret;
+
+		if (header.basic.cap == cap)
+			return offset;
+
+		offset = header.basic.next;
+	} while (offset);
+
+	return -ENOENT;
+}
+
+static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+{
+	int offset = sw->config.first_cap_offset;
+
+	while (offset > 0 && offset < CAP_OFFSET_MAX) {
+		struct tb_cap_any header;
+		int ret;
+
+		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
+		if (ret)
+			return ret;
+
+		if (header.basic.cap == cap)
+			return offset;
+
+		offset = header.basic.next;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * tb_switch_find_vse_cap() - Find switch vendor specific capability
+ * @sw: Switch to find the capability for
+ * @vsec: Vendor specific capability to look
+ *
+ * Functions enumerates vendor specific capabilities (VSEC) of a switch
+ * and returns offset when capability matching @vsec is found. If no
+ * such capability is found returns %-ENOENT. In case of error returns
+ * negative errno.
+ */
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
+{
+	struct tb_cap_any header;
+	int offset;
+
+	offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
+	if (offset < 0)
+		return offset;
+
+	while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
+		int ret;
+
+		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+		if (ret)
+			return ret;
+
+		/*
+		 * Extended vendor specific capabilities come in two
+		 * flavors: short and long. The latter is used when
+		 * offset is over 0xff.
+		 */
+		if (offset >= CAP_OFFSET_MAX) {
+			if (header.extended_long.vsec_id == vsec)
+				return offset;
+			offset = header.extended_long.next;
+		} else {
+			if (header.extended_short.vsec_id == vsec)
+				return offset;
+			if (!header.extended_short.length)
+				return -ENOENT;
+			offset = header.extended_short.next;
+		}
+	}
+
+	return -ENOENT;
+}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
new file mode 100644
index 0000000..37a7f4c
--- /dev/null
+++ b/drivers/thunderbolt/ctl.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+
+
+#define TB_CTL_RX_PKG_COUNT	10
+#define TB_CTL_RETRIES		4
+
+/**
+ * struct tb_cfg - thunderbolt control channel
+ */
+struct tb_ctl {
+	struct tb_nhi *nhi;
+	struct tb_ring *tx;
+	struct tb_ring *rx;
+
+	struct dma_pool *frame_pool;
+	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
+	struct mutex request_queue_lock;
+	struct list_head request_queue;
+	bool running;
+
+	event_cb callback;
+	void *callback_data;
+};
+
+
+#define tb_ctl_WARN(ctl, format, arg...) \
+	dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_err(ctl, format, arg...) \
+	dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_warn(ctl, format, arg...) \
+	dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_info(ctl, format, arg...) \
+	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_dbg(ctl, format, arg...) \
+	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
+/* Serializes access to request kref_get/put */
+static DEFINE_MUTEX(tb_cfg_request_lock);
+
+/**
+ * tb_cfg_request_alloc() - Allocates a new config request
+ *
+ * This is refcounted object so when you are done with this, call
+ * tb_cfg_request_put() to it.
+ */
+struct tb_cfg_request *tb_cfg_request_alloc(void)
+{
+	struct tb_cfg_request *req;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	kref_init(&req->kref);
+
+	return req;
+}
+
+/**
+ * tb_cfg_request_get() - Increase refcount of a request
+ * @req: Request whose refcount is increased
+ */
+void tb_cfg_request_get(struct tb_cfg_request *req)
+{
+	mutex_lock(&tb_cfg_request_lock);
+	kref_get(&req->kref);
+	mutex_unlock(&tb_cfg_request_lock);
+}
+
+static void tb_cfg_request_destroy(struct kref *kref)
+{
+	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
+
+	kfree(req);
+}
+
+/**
+ * tb_cfg_request_put() - Decrease refcount and possibly release the request
+ * @req: Request whose refcount is decreased
+ *
+ * Call this function when you are done with the request. When refcount
+ * goes to %0 the object is released.
+ */
+void tb_cfg_request_put(struct tb_cfg_request *req)
+{
+	mutex_lock(&tb_cfg_request_lock);
+	kref_put(&req->kref, tb_cfg_request_destroy);
+	mutex_unlock(&tb_cfg_request_lock);
+}
+
+static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
+				  struct tb_cfg_request *req)
+{
+	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
+	WARN_ON(req->ctl);
+
+	mutex_lock(&ctl->request_queue_lock);
+	if (!ctl->running) {
+		mutex_unlock(&ctl->request_queue_lock);
+		return -ENOTCONN;
+	}
+	req->ctl = ctl;
+	list_add_tail(&req->list, &ctl->request_queue);
+	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+	mutex_unlock(&ctl->request_queue_lock);
+	return 0;
+}
+
+static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
+{
+	struct tb_ctl *ctl = req->ctl;
+
+	mutex_lock(&ctl->request_queue_lock);
+	list_del(&req->list);
+	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+		wake_up(&tb_cfg_request_cancel_queue);
+	mutex_unlock(&ctl->request_queue_lock);
+}
+
+static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
+{
+	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+}
+
+static struct tb_cfg_request *
+tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
+{
+	struct tb_cfg_request *req;
+	bool found = false;
+
+	mutex_lock(&pkg->ctl->request_queue_lock);
+	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
+		tb_cfg_request_get(req);
+		if (req->match(req, pkg)) {
+			found = true;
+			break;
+		}
+		tb_cfg_request_put(req);
+	}
+	mutex_unlock(&pkg->ctl->request_queue_lock);
+
+	return found ? req : NULL;
+}
+
+/* utility functions */
+
+
+static int check_header(const struct ctl_pkg *pkg, u32 len,
+			enum tb_cfg_pkg_type type, u64 route)
+{
+	struct tb_cfg_header *header = pkg->buffer;
+
+	/* check frame, TODO: frame flags */
+	if (WARN(len != pkg->frame.size,
+			"wrong framesize (expected %#x, got %#x)\n",
+			len, pkg->frame.size))
+		return -EIO;
+	if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
+			type, pkg->frame.eof))
+		return -EIO;
+	if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
+			pkg->frame.sof))
+		return -EIO;
+
+	/* check header */
+	if (WARN(header->unknown != 1 << 9,
+			"header->unknown is %#x\n", header->unknown))
+		return -EIO;
+	if (WARN(route != tb_cfg_get_route(header),
+			"wrong route (expected %llx, got %llx)",
+			route, tb_cfg_get_route(header)))
+		return -EIO;
+	return 0;
+}
+
+static int check_config_address(struct tb_cfg_address addr,
+				enum tb_cfg_space space, u32 offset,
+				u32 length)
+{
+	if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
+		return -EIO;
+	if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
+			space, addr.space))
+		return -EIO;
+	if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
+			offset, addr.offset))
+		return -EIO;
+	if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
+			length, addr.length))
+		return -EIO;
+	/*
+	 * We cannot check addr->port as it is set to the upstream port of the
+	 * sender.
+	 */
+	return 0;
+}
+
+static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
+{
+	struct cfg_error_pkg *pkg = response->buffer;
+	struct tb_cfg_result res = { 0 };
+	res.response_route = tb_cfg_get_route(&pkg->header);
+	res.response_port = 0;
+	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
+			       tb_cfg_get_route(&pkg->header));
+	if (res.err)
+		return res;
+
+	WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
+	WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
+	WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
+	res.err = 1;
+	res.tb_error = pkg->error;
+	res.response_port = pkg->port;
+	return res;
+
+}
+
+static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
+					 enum tb_cfg_pkg_type type, u64 route)
+{
+	struct tb_cfg_header *header = pkg->buffer;
+	struct tb_cfg_result res = { 0 };
+
+	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+		return decode_error(pkg);
+
+	res.response_port = 0; /* will be updated later for cfg_read/write */
+	res.response_route = tb_cfg_get_route(header);
+	res.err = check_header(pkg, len, type, route);
+	return res;
+}
+
+static void tb_cfg_print_error(struct tb_ctl *ctl,
+			       const struct tb_cfg_result *res)
+{
+	WARN_ON(res->err != 1);
+	switch (res->tb_error) {
+	case TB_CFG_ERROR_PORT_NOT_CONNECTED:
+		/* Port is not connected. This can happen during surprise
+		 * removal. Do not warn. */
+		return;
+	case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
+		/*
+		 * Invalid cfg_space/offset/length combination in
+		 * cfg_read/cfg_write.
+		 */
+		tb_ctl_WARN(ctl,
+			"CFG_ERROR(%llx:%x): Invalid config space or offset\n",
+			res->response_route, res->response_port);
+		return;
+	case TB_CFG_ERROR_NO_SUCH_PORT:
+		/*
+		 * - The route contains a non-existent port.
+		 * - The route contains a non-PHY port (e.g. PCIe).
+		 * - The port in cfg_read/cfg_write does not exist.
+		 */
+		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
+			res->response_route, res->response_port);
+		return;
+	case TB_CFG_ERROR_LOOP:
+		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
+			res->response_route, res->response_port);
+		return;
+	default:
+		/* 5,6,7,9 and 11 are also valid error codes */
+		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
+			res->response_route, res->response_port);
+		return;
+	}
+}
+
+static __be32 tb_crc(const void *data, size_t len)
+{
+	return cpu_to_be32(~__crc32c_le(~0, data, len));
+}
+
+static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
+{
+	if (pkg) {
+		dma_pool_free(pkg->ctl->frame_pool,
+			      pkg->buffer, pkg->frame.buffer_phy);
+		kfree(pkg);
+	}
+}
+
+static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
+{
+	struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
+	if (!pkg)
+		return NULL;
+	pkg->ctl = ctl;
+	pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
+				     &pkg->frame.buffer_phy);
+	if (!pkg->buffer) {
+		kfree(pkg);
+		return NULL;
+	}
+	return pkg;
+}
+
+
+/* RX/TX handling */
+
+static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
+			       bool canceled)
+{
+	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+	tb_ctl_pkg_free(pkg);
+}
+
+/**
+ * tb_cfg_tx() - transmit a packet on the control channel
+ *
+ * len must be a multiple of four.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
+		     enum tb_cfg_pkg_type type)
+{
+	int res;
+	struct ctl_pkg *pkg;
+	if (len % 4 != 0) { /* required for le->be conversion */
+		tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
+		return -EINVAL;
+	}
+	if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
+		tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
+			    len, TB_FRAME_SIZE - 4);
+		return -EINVAL;
+	}
+	pkg = tb_ctl_pkg_alloc(ctl);
+	if (!pkg)
+		return -ENOMEM;
+	pkg->frame.callback = tb_ctl_tx_callback;
+	pkg->frame.size = len + 4;
+	pkg->frame.sof = type;
+	pkg->frame.eof = type;
+	cpu_to_be32_array(pkg->buffer, data, len / 4);
+	*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
+
+	res = tb_ring_tx(ctl->tx, &pkg->frame);
+	if (res) /* ring is stopped */
+		tb_ctl_pkg_free(pkg);
+	return res;
+}
+
+/**
+ * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
+ */
+static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+				struct ctl_pkg *pkg, size_t size)
+{
+	return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
+}
+
+static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
+{
+	tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
+					     * We ignore failures during stop.
+					     * All rx packets are referenced
+					     * from ctl->rx_packets, so we do
+					     * not loose them.
+					     */
+}
+
+static int tb_async_error(const struct ctl_pkg *pkg)
+{
+	const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+
+	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
+		return false;
+
+	switch (error->error) {
+	case TB_CFG_ERROR_LINK_ERROR:
+	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+		return true;
+
+	default:
+		return false;
+	}
+}
+
+static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
+			       bool canceled)
+{
+	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+	struct tb_cfg_request *req;
+	__be32 crc32;
+
+	if (canceled)
+		return; /*
+			 * ring is stopped, packet is referenced from
+			 * ctl->rx_packets.
+			 */
+
+	if (frame->size < 4 || frame->size % 4 != 0) {
+		tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
+			   frame->size);
+		goto rx;
+	}
+
+	frame->size -= 4; /* remove checksum */
+	crc32 = tb_crc(pkg->buffer, frame->size);
+	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
+
+	switch (frame->eof) {
+	case TB_CFG_PKG_READ:
+	case TB_CFG_PKG_WRITE:
+	case TB_CFG_PKG_ERROR:
+	case TB_CFG_PKG_OVERRIDE:
+	case TB_CFG_PKG_RESET:
+		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+			tb_ctl_err(pkg->ctl,
+				   "RX: checksum mismatch, dropping packet\n");
+			goto rx;
+		}
+		if (tb_async_error(pkg)) {
+			tb_ctl_handle_event(pkg->ctl, frame->eof,
+					    pkg, frame->size);
+			goto rx;
+		}
+		break;
+
+	case TB_CFG_PKG_EVENT:
+	case TB_CFG_PKG_XDOMAIN_RESP:
+	case TB_CFG_PKG_XDOMAIN_REQ:
+		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+			tb_ctl_err(pkg->ctl,
+				   "RX: checksum mismatch, dropping packet\n");
+			goto rx;
+		}
+		/* Fall through */
+	case TB_CFG_PKG_ICM_EVENT:
+		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
+			goto rx;
+		break;
+
+	default:
+		break;
+	}
+
+	/*
+	 * The received packet will be processed only if there is an
+	 * active request and that the packet is what is expected. This
+	 * prevents packets such as replies coming after timeout has
+	 * triggered from messing with the active requests.
+	 */
+	req = tb_cfg_request_find(pkg->ctl, pkg);
+	if (req) {
+		if (req->copy(req, pkg))
+			schedule_work(&req->work);
+		tb_cfg_request_put(req);
+	}
+
+rx:
+	tb_ctl_rx_submit(pkg);
+}
+
+static void tb_cfg_request_work(struct work_struct *work)
+{
+	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
+
+	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+		req->callback(req->callback_data);
+
+	tb_cfg_request_dequeue(req);
+	tb_cfg_request_put(req);
+}
+
+/**
+ * tb_cfg_request() - Start control request not waiting for it to complete
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @callback: Callback called when the request is completed
+ * @callback_data: Data to be passed to @callback
+ *
+ * This queues @req on the given control channel without waiting for it
+ * to complete. When the request completes @callback is called.
+ */
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+		   void (*callback)(void *), void *callback_data)
+{
+	int ret;
+
+	req->flags = 0;
+	req->callback = callback;
+	req->callback_data = callback_data;
+	INIT_WORK(&req->work, tb_cfg_request_work);
+	INIT_LIST_HEAD(&req->list);
+
+	tb_cfg_request_get(req);
+	ret = tb_cfg_request_enqueue(ctl, req);
+	if (ret)
+		goto err_put;
+
+	ret = tb_ctl_tx(ctl, req->request, req->request_size,
+			req->request_type);
+	if (ret)
+		goto err_dequeue;
+
+	if (!req->response)
+		schedule_work(&req->work);
+
+	return 0;
+
+err_dequeue:
+	tb_cfg_request_dequeue(req);
+err_put:
+	tb_cfg_request_put(req);
+
+	return ret;
+}
+
+/**
+ * tb_cfg_request_cancel() - Cancel a control request
+ * @req: Request to cancel
+ * @err: Error to assign to the request
+ *
+ * This function can be used to cancel ongoing request. It will wait
+ * until the request is not active anymore.
+ */
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
+{
+	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
+	schedule_work(&req->work);
+	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
+	req->result.err = err;
+}
+
+static void tb_cfg_request_complete(void *data)
+{
+	complete(data);
+}
+
+/**
+ * tb_cfg_request_sync() - Start control request and wait until it completes
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @timeout_msec: Timeout how long to wait @req to complete
+ *
+ * Starts a control request and waits until it completes. If timeout
+ * triggers the request is canceled before function returns. Note the
+ * caller needs to make sure only one message for given switch is active
+ * at a time.
+ */
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+					 struct tb_cfg_request *req,
+					 int timeout_msec)
+{
+	unsigned long timeout = msecs_to_jiffies(timeout_msec);
+	struct tb_cfg_result res = { 0 };
+	DECLARE_COMPLETION_ONSTACK(done);
+	int ret;
+
+	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
+	if (ret) {
+		res.err = ret;
+		return res;
+	}
+
+	if (!wait_for_completion_timeout(&done, timeout))
+		tb_cfg_request_cancel(req, -ETIMEDOUT);
+
+	flush_work(&req->work);
+
+	return req->result;
+}
+
+/* public interface, alloc/start/stop/free */
+
+/**
+ * tb_ctl_alloc() - allocate a control channel
+ *
+ * cb will be invoked once for every hot plug event.
+ *
+ * Return: Returns a pointer on success or NULL on failure.
+ */
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
+{
+	int i;
+	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+	if (!ctl)
+		return NULL;
+	ctl->nhi = nhi;
+	ctl->callback = cb;
+	ctl->callback_data = cb_data;
+
+	mutex_init(&ctl->request_queue_lock);
+	INIT_LIST_HEAD(&ctl->request_queue);
+	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
+					 TB_FRAME_SIZE, 4, 0);
+	if (!ctl->frame_pool)
+		goto err;
+
+	ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+	if (!ctl->tx)
+		goto err;
+
+	ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
+				0xffff, NULL, NULL);
+	if (!ctl->rx)
+		goto err;
+
+	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
+		ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
+		if (!ctl->rx_packets[i])
+			goto err;
+		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
+	}
+
+	tb_ctl_info(ctl, "control channel created\n");
+	return ctl;
+err:
+	tb_ctl_free(ctl);
+	return NULL;
+}
+
+/**
+ * tb_ctl_free() - free a control channel
+ *
+ * Must be called after tb_ctl_stop.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_free(struct tb_ctl *ctl)
+{
+	int i;
+
+	if (!ctl)
+		return;
+
+	if (ctl->rx)
+		tb_ring_free(ctl->rx);
+	if (ctl->tx)
+		tb_ring_free(ctl->tx);
+
+	/* free RX packets */
+	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+		tb_ctl_pkg_free(ctl->rx_packets[i]);
+
+
+	if (ctl->frame_pool)
+		dma_pool_destroy(ctl->frame_pool);
+	kfree(ctl);
+}
+
+/**
+ * tb_cfg_start() - start/resume the control channel
+ */
+void tb_ctl_start(struct tb_ctl *ctl)
+{
+	int i;
+	tb_ctl_info(ctl, "control channel starting...\n");
+	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
+	tb_ring_start(ctl->rx);
+	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+		tb_ctl_rx_submit(ctl->rx_packets[i]);
+
+	ctl->running = true;
+}
+
+/**
+ * control() - pause the control channel
+ *
+ * All invocations of ctl->callback will have finished after this method
+ * returns.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_stop(struct tb_ctl *ctl)
+{
+	mutex_lock(&ctl->request_queue_lock);
+	ctl->running = false;
+	mutex_unlock(&ctl->request_queue_lock);
+
+	tb_ring_stop(ctl->rx);
+	tb_ring_stop(ctl->tx);
+
+	if (!list_empty(&ctl->request_queue))
+		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
+	INIT_LIST_HEAD(&ctl->request_queue);
+	tb_ctl_info(ctl, "control channel stopped\n");
+}
+
+/* public interface, commands */
+
+/**
+ * tb_cfg_error() - send error packet
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
+		 enum tb_cfg_error error)
+{
+	struct cfg_error_pkg pkg = {
+		.header = tb_cfg_make_header(route),
+		.port = port,
+		.error = error,
+	};
+	tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
+	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
+}
+
+static bool tb_cfg_match(const struct tb_cfg_request *req,
+			 const struct ctl_pkg *pkg)
+{
+	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+		return true;
+
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (route != tb_cfg_get_route(req->request))
+		return false;
+	if (pkg->frame.size != req->response_size)
+		return false;
+
+	if (pkg->frame.eof == TB_CFG_PKG_READ ||
+	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
+		const struct cfg_read_pkg *req_hdr = req->request;
+		const struct cfg_read_pkg *res_hdr = pkg->buffer;
+
+		if (req_hdr->addr.seq != res_hdr->addr.seq)
+			return false;
+	}
+
+	return true;
+}
+
+static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	struct tb_cfg_result res;
+
+	/* Now make sure it is in expected format */
+	res = parse_header(pkg, req->response_size, req->response_type,
+			   tb_cfg_get_route(req->request));
+	if (!res.err)
+		memcpy(req->response, pkg->buffer, req->response_size);
+
+	req->result = res;
+
+	/* Always complete when first response is received */
+	return true;
+}
+
+/**
+ * tb_cfg_reset() - send a reset packet and wait for a response
+ *
+ * If the switch at route is incorrectly configured then we will not receive a
+ * reply (even though the switch will reset). The caller should check for
+ * -ETIMEDOUT and attempt to reconfigure the switch.
+ */
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+				  int timeout_msec)
+{
+	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
+	struct tb_cfg_result res = { 0 };
+	struct tb_cfg_header reply;
+	struct tb_cfg_request *req;
+
+	req = tb_cfg_request_alloc();
+	if (!req) {
+		res.err = -ENOMEM;
+		return res;
+	}
+
+	req->match = tb_cfg_match;
+	req->copy = tb_cfg_copy;
+	req->request = &request;
+	req->request_size = sizeof(request);
+	req->request_type = TB_CFG_PKG_RESET;
+	req->response = &reply;
+	req->response_size = sizeof(reply);
+	req->response_type = TB_CFG_PKG_RESET;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	return res;
+}
+
+/**
+ * tb_cfg_read() - read from config space into buffer
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+		u64 route, u32 port, enum tb_cfg_space space,
+		u32 offset, u32 length, int timeout_msec)
+{
+	struct tb_cfg_result res = { 0 };
+	struct cfg_read_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.port = port,
+			.space = space,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct cfg_write_pkg reply;
+	int retries = 0;
+
+	while (retries < TB_CTL_RETRIES) {
+		struct tb_cfg_request *req;
+
+		req = tb_cfg_request_alloc();
+		if (!req) {
+			res.err = -ENOMEM;
+			return res;
+		}
+
+		request.addr.seq = retries++;
+
+		req->match = tb_cfg_match;
+		req->copy = tb_cfg_copy;
+		req->request = &request;
+		req->request_size = sizeof(request);
+		req->request_type = TB_CFG_PKG_READ;
+		req->response = &reply;
+		req->response_size = 12 + 4 * length;
+		req->response_type = TB_CFG_PKG_READ;
+
+		res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			break;
+
+		/* Wait a bit (arbitrary time) until we send a retry */
+		usleep_range(10, 100);
+	}
+
+	if (res.err)
+		return res;
+
+	res.response_port = reply.addr.port;
+	res.err = check_config_address(reply.addr, space, offset, length);
+	if (!res.err)
+		memcpy(buffer, &reply.data, 4 * length);
+	return res;
+}
+
+/**
+ * tb_cfg_write() - write from buffer into config space
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
+		u64 route, u32 port, enum tb_cfg_space space,
+		u32 offset, u32 length, int timeout_msec)
+{
+	struct tb_cfg_result res = { 0 };
+	struct cfg_write_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.port = port,
+			.space = space,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct cfg_read_pkg reply;
+	int retries = 0;
+
+	memcpy(&request.data, buffer, length * 4);
+
+	while (retries < TB_CTL_RETRIES) {
+		struct tb_cfg_request *req;
+
+		req = tb_cfg_request_alloc();
+		if (!req) {
+			res.err = -ENOMEM;
+			return res;
+		}
+
+		request.addr.seq = retries++;
+
+		req->match = tb_cfg_match;
+		req->copy = tb_cfg_copy;
+		req->request = &request;
+		req->request_size = 12 + 4 * length;
+		req->request_type = TB_CFG_PKG_WRITE;
+		req->response = &reply;
+		req->response_size = sizeof(reply);
+		req->response_type = TB_CFG_PKG_WRITE;
+
+		res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			break;
+
+		/* Wait a bit (arbitrary time) until we send a retry */
+		usleep_range(10, 100);
+	}
+
+	if (res.err)
+		return res;
+
+	res.response_port = reply.addr.port;
+	res.err = check_config_address(reply.addr, space, offset, length);
+	return res;
+}
+
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+		enum tb_cfg_space space, u32 offset, u32 length)
+{
+	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
+			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+	switch (res.err) {
+	case 0:
+		/* Success */
+		break;
+
+	case 1:
+		/* Thunderbolt error, tb_error holds the actual number */
+		tb_cfg_print_error(ctl, &res);
+		return -EIO;
+
+	case -ETIMEDOUT:
+		tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
+			    space, offset);
+		break;
+
+	default:
+		WARN(1, "tb_cfg_read: %d\n", res.err);
+		break;
+	}
+	return res.err;
+}
+
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
+		 enum tb_cfg_space space, u32 offset, u32 length)
+{
+	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
+			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+	switch (res.err) {
+	case 0:
+		/* Success */
+		break;
+
+	case 1:
+		/* Thunderbolt error, tb_error holds the actual number */
+		tb_cfg_print_error(ctl, &res);
+		return -EIO;
+
+	case -ETIMEDOUT:
+		tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
+			    space, offset);
+		break;
+
+	default:
+		WARN(1, "tb_cfg_write: %d\n", res.err);
+		break;
+	}
+	return res.err;
+}
+
+/**
+ * tb_cfg_get_upstream_port() - get upstream port number of switch at route
+ *
+ * Reads the first dword from the switches TB_CFG_SWITCH config area and
+ * returns the port number from which the reply originated.
+ *
+ * Return: Returns the upstream port number on success or an error code on
+ * failure.
+ */
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
+{
+	u32 dummy;
+	struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
+						   TB_CFG_SWITCH, 0, 1,
+						   TB_CFG_DEFAULT_TIMEOUT);
+	if (res.err == 1)
+		return -EIO;
+	if (res.err)
+		return res.err;
+	return res.response_port;
+}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
new file mode 100644
index 0000000..3062e0b
--- /dev/null
+++ b/drivers/thunderbolt/ctl.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_CFG
+#define _TB_CFG
+
+#include <linux/kref.h>
+#include <linux/thunderbolt.h>
+
+#include "nhi.h"
+#include "tb_msgs.h"
+
+/* control channel */
+struct tb_ctl;
+
+typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+			 const void *buf, size_t size);
+
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
+void tb_ctl_start(struct tb_ctl *ctl);
+void tb_ctl_stop(struct tb_ctl *ctl);
+void tb_ctl_free(struct tb_ctl *ctl);
+
+/* configuration commands */
+
+#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
+
+struct tb_cfg_result {
+	u64 response_route;
+	u32 response_port; /*
+			    * If err = 1 then this is the port that send the
+			    * error.
+			    * If err = 0 and if this was a cfg_read/write then
+			    * this is the the upstream port of the responding
+			    * switch.
+			    * Otherwise the field is set to zero.
+			    */
+	int err; /* negative errors, 0 for success, 1 for tb errors */
+	enum tb_cfg_error tb_error; /* valid if err == 1 */
+};
+
+struct ctl_pkg {
+	struct tb_ctl *ctl;
+	void *buffer;
+	struct ring_frame frame;
+};
+
+/**
+ * struct tb_cfg_request - Control channel request
+ * @kref: Reference count
+ * @ctl: Pointer to the control channel structure. Only set when the
+ *	 request is queued.
+ * @request_size: Size of the request packet (in bytes)
+ * @request_type: Type of the request packet
+ * @response: Response is stored here
+ * @response_size: Maximum size of one response packet
+ * @response_type: Expected type of the response packet
+ * @npackets: Number of packets expected to be returned with this request
+ * @match: Function used to match the incoming packet
+ * @copy: Function used to copy the incoming packet to @response
+ * @callback: Callback called when the request is finished successfully
+ * @callback_data: Data to be passed to @callback
+ * @flags: Flags for the request
+ * @work: Work item used to complete the request
+ * @result: Result after the request has been completed
+ * @list: Requests are queued using this field
+ *
+ * An arbitrary request over Thunderbolt control channel. For standard
+ * control channel message, one should use tb_cfg_read/write() and
+ * friends if possible.
+ */
+struct tb_cfg_request {
+	struct kref kref;
+	struct tb_ctl *ctl;
+	const void *request;
+	size_t request_size;
+	enum tb_cfg_pkg_type request_type;
+	void *response;
+	size_t response_size;
+	enum tb_cfg_pkg_type response_type;
+	size_t npackets;
+	bool (*match)(const struct tb_cfg_request *req,
+		      const struct ctl_pkg *pkg);
+	bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
+	void (*callback)(void *callback_data);
+	void *callback_data;
+	unsigned long flags;
+	struct work_struct work;
+	struct tb_cfg_result result;
+	struct list_head list;
+};
+
+#define TB_CFG_REQUEST_ACTIVE		0
+#define TB_CFG_REQUEST_CANCELED		1
+
+struct tb_cfg_request *tb_cfg_request_alloc(void);
+void tb_cfg_request_get(struct tb_cfg_request *req);
+void tb_cfg_request_put(struct tb_cfg_request *req);
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+		   void (*callback)(void *), void *callback_data);
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+			struct tb_cfg_request *req, int timeout_msec);
+
+static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
+{
+	return (u64) header->route_hi << 32 | header->route_lo;
+}
+
+static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
+{
+	struct tb_cfg_header header = {
+		.route_hi = route >> 32,
+		.route_lo = route,
+	};
+	/* check for overflow, route_hi is not 32 bits! */
+	WARN_ON(tb_cfg_get_route(&header) != route);
+	return header;
+}
+
+int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
+		 enum tb_cfg_error error);
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+				  int timeout_msec);
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+				     u64 route, u32 port,
+				     enum tb_cfg_space space, u32 offset,
+				     u32 length, int timeout_msec);
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
+				      u64 route, u32 port,
+				      enum tb_cfg_space space, u32 offset,
+				      u32 length, int timeout_msec);
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+		enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
+		 enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
+
+
+#endif
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
new file mode 100644
index 0000000..f270119
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.c
@@ -0,0 +1,522 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dma_port.h"
+#include "tb_regs.h"
+
+#define DMA_PORT_CAP			0x3e
+
+#define MAIL_DATA			1
+#define MAIL_DATA_DWORDS		16
+
+#define MAIL_IN				17
+#define MAIL_IN_CMD_SHIFT		28
+#define MAIL_IN_CMD_MASK		GENMASK(31, 28)
+#define MAIL_IN_CMD_FLASH_WRITE		0x0
+#define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
+#define MAIL_IN_CMD_FLASH_READ		0x2
+#define MAIL_IN_CMD_POWER_CYCLE		0x4
+#define MAIL_IN_DWORDS_SHIFT		24
+#define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
+#define MAIL_IN_ADDRESS_SHIFT		2
+#define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
+#define MAIL_IN_CSS			BIT(1)
+#define MAIL_IN_OP_REQUEST		BIT(0)
+
+#define MAIL_OUT			18
+#define MAIL_OUT_STATUS_RESPONSE	BIT(29)
+#define MAIL_OUT_STATUS_CMD_SHIFT	4
+#define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
+#define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
+#define MAIL_OUT_STATUS_COMPLETED	0
+#define MAIL_OUT_STATUS_ERR_AUTH	1
+#define MAIL_OUT_STATUS_ERR_ACCESS	2
+
+#define DMA_PORT_TIMEOUT		5000 /* ms */
+#define DMA_PORT_RETRIES		3
+
+/**
+ * struct tb_dma_port - DMA control port
+ * @sw: Switch the DMA port belongs to
+ * @port: Switch port number where DMA capability is found
+ * @base: Start offset of the mailbox registers
+ * @buf: Temporary buffer to store a single block
+ */
+struct tb_dma_port {
+	struct tb_switch *sw;
+	u8 port;
+	u32 base;
+	u8 *buf;
+};
+
+/*
+ * When the switch is in safe mode it supports very little functionality
+ * so we don't validate that much here.
+ */
+static bool dma_port_match(const struct tb_cfg_request *req,
+			   const struct ctl_pkg *pkg)
+{
+	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+		return true;
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (route != tb_cfg_get_route(req->request))
+		return false;
+	if (pkg->frame.size != req->response_size)
+		return false;
+
+	return true;
+}
+
+static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	memcpy(req->response, pkg->buffer, req->response_size);
+	return true;
+}
+
+static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
+			 u32 port, u32 offset, u32 length, int timeout_msec)
+{
+	struct cfg_read_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.seq = 1,
+			.port = port,
+			.space = TB_CFG_PORT,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct tb_cfg_request *req;
+	struct cfg_write_pkg reply;
+	struct tb_cfg_result res;
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = dma_port_match;
+	req->copy = dma_port_copy;
+	req->request = &request;
+	req->request_size = sizeof(request);
+	req->request_type = TB_CFG_PKG_READ;
+	req->response = &reply;
+	req->response_size = 12 + 4 * length;
+	req->response_type = TB_CFG_PKG_READ;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	if (res.err)
+		return res.err;
+
+	memcpy(buffer, &reply.data, 4 * length);
+	return 0;
+}
+
+static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
+			  u32 port, u32 offset, u32 length, int timeout_msec)
+{
+	struct cfg_write_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.seq = 1,
+			.port = port,
+			.space = TB_CFG_PORT,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct tb_cfg_request *req;
+	struct cfg_read_pkg reply;
+	struct tb_cfg_result res;
+
+	memcpy(&request.data, buffer, length * 4);
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = dma_port_match;
+	req->copy = dma_port_copy;
+	req->request = &request;
+	req->request_size = 12 + 4 * length;
+	req->request_type = TB_CFG_PKG_WRITE;
+	req->response = &reply;
+	req->response_size = sizeof(reply);
+	req->response_type = TB_CFG_PKG_WRITE;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	return res.err;
+}
+
+static int dma_find_port(struct tb_switch *sw)
+{
+	static const int ports[] = { 3, 5, 7 };
+	int i;
+
+	/*
+	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
+	 * controller. Try all of them.
+	 */
+	for (i = 0; i < ARRAY_SIZE(ports); i++) {
+		u32 type;
+		int ret;
+
+		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
+				    2, 1, DMA_PORT_TIMEOUT);
+		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+			return ports[i];
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * dma_port_alloc() - Finds DMA control port from a switch pointed by route
+ * @sw: Switch from where find the DMA port
+ *
+ * Function checks if the switch NHI port supports DMA configuration
+ * based mailbox capability and if it does, allocates and initializes
+ * DMA port structure. Returns %NULL if the capabity was not found.
+ *
+ * The DMA control port is functional also when the switch is in safe
+ * mode.
+ */
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
+{
+	struct tb_dma_port *dma;
+	int port;
+
+	port = dma_find_port(sw);
+	if (port < 0)
+		return NULL;
+
+	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma)
+		return NULL;
+
+	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
+	if (!dma->buf) {
+		kfree(dma);
+		return NULL;
+	}
+
+	dma->sw = sw;
+	dma->port = port;
+	dma->base = DMA_PORT_CAP;
+
+	return dma;
+}
+
+/**
+ * dma_port_free() - Release DMA control port structure
+ * @dma: DMA control port
+ */
+void dma_port_free(struct tb_dma_port *dma)
+{
+	if (dma) {
+		kfree(dma->buf);
+		kfree(dma);
+	}
+}
+
+static int dma_port_wait_for_completion(struct tb_dma_port *dma,
+					unsigned int timeout)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout);
+	struct tb_switch *sw = dma->sw;
+
+	do {
+		int ret;
+		u32 in;
+
+		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
+				    dma->base + MAIL_IN, 1, 50);
+		if (ret) {
+			if (ret != -ETIMEDOUT)
+				return ret;
+		} else if (!(in & MAIL_IN_OP_REQUEST)) {
+			return 0;
+		}
+
+		usleep_range(50, 100);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+}
+
+static int status_to_errno(u32 status)
+{
+	switch (status & MAIL_OUT_STATUS_MASK) {
+	case MAIL_OUT_STATUS_COMPLETED:
+		return 0;
+	case MAIL_OUT_STATUS_ERR_AUTH:
+		return -EINVAL;
+	case MAIL_OUT_STATUS_ERR_ACCESS:
+		return -EACCES;
+	}
+
+	return -EIO;
+}
+
+static int dma_port_request(struct tb_dma_port *dma, u32 in,
+			    unsigned int timeout)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 out;
+	int ret;
+
+	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
+			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	ret = dma_port_wait_for_completion(dma, timeout);
+	if (ret)
+		return ret;
+
+	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return status_to_errno(out);
+}
+
+static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
+				     void *buf, u32 size)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 in, dwaddress, dwords;
+	int ret;
+
+	dwaddress = address / 4;
+	dwords = size / 4;
+
+	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
+	if (dwords < MAIL_DATA_DWORDS)
+		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+	in |= MAIL_IN_OP_REQUEST;
+
+	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
+			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+}
+
+static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
+				      const void *buf, u32 size)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 in, dwaddress, dwords;
+	int ret;
+
+	dwords = size / 4;
+
+	/* Write the block to MAIL_DATA registers */
+	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
+			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+
+	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
+
+	/* CSS header write is always done to the same magic address */
+	if (address >= DMA_PORT_CSS_ADDRESS) {
+		dwaddress = DMA_PORT_CSS_ADDRESS;
+		in |= MAIL_IN_CSS;
+	} else {
+		dwaddress = address / 4;
+	}
+
+	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+}
+
+/**
+ * dma_port_flash_read() - Read from active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of active region
+ * @buf: Buffer where the data is read
+ * @size: Size of the buffer
+ */
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+			void *buf, size_t size)
+{
+	unsigned int retries = DMA_PORT_RETRIES;
+	unsigned int offset;
+
+	offset = address & 3;
+	address = address & ~3;
+
+	do {
+		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+		int ret;
+
+		ret = dma_port_flash_read_block(dma, address, dma->buf,
+						ALIGN(nbytes, 4));
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		memcpy(buf, dma->buf + offset, nbytes);
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+/**
+ * dma_port_flash_write() - Write to non-active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of non-active region
+ * @buf: Data to write
+ * @size: Size of the buffer
+ *
+ * Writes block of data to the non-active flash region of the switch. If
+ * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
+ * using CSS command.
+ */
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+			 const void *buf, size_t size)
+{
+	unsigned int retries = DMA_PORT_RETRIES;
+	unsigned int offset;
+
+	if (address >= DMA_PORT_CSS_ADDRESS) {
+		offset = 0;
+		if (size > DMA_PORT_CSS_MAX_SIZE)
+			return -E2BIG;
+	} else {
+		offset = address & 3;
+		address = address & ~3;
+	}
+
+	do {
+		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+		int ret;
+
+		memcpy(dma->buf + offset, buf, nbytes);
+
+		ret = dma_port_flash_write_block(dma, address, buf, nbytes);
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+/**
+ * dma_port_flash_update_auth() - Starts flash authenticate cycle
+ * @dma: DMA control port
+ *
+ * Starts the flash update authentication cycle. If the image in the
+ * non-active area was valid, the switch starts upgrade process where
+ * active and non-active area get swapped in the end. Caller should call
+ * dma_port_flash_update_auth_status() to get status of this command.
+ * This is because if the switch in question is root switch the
+ * thunderbolt host controller gets reset as well.
+ */
+int dma_port_flash_update_auth(struct tb_dma_port *dma)
+{
+	u32 in;
+
+	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, 150);
+}
+
+/**
+ * dma_port_flash_update_auth_status() - Reads status of update auth command
+ * @dma: DMA control port
+ * @status: Status code of the operation
+ *
+ * The function checks if there is status available from the last update
+ * auth command. Returns %0 if there is no status and no further
+ * action is required. If there is status, %1 is returned instead and
+ * @status holds the failure code.
+ *
+ * Negative return means there was an error reading status from the
+ * switch.
+ */
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 out, cmd;
+	int ret;
+
+	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	/* Check if the status relates to flash update auth */
+	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
+	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
+		if (status)
+			*status = out & MAIL_OUT_STATUS_MASK;
+
+		/* Reset is needed in any case */
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * dma_port_power_cycle() - Power cycles the switch
+ * @dma: DMA control port
+ *
+ * Triggers power cycle to the switch.
+ */
+int dma_port_power_cycle(struct tb_dma_port *dma)
+{
+	u32 in;
+
+	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, 150);
+}
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
new file mode 100644
index 0000000..c4a69e0
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.h
@@ -0,0 +1,34 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DMA_PORT_H_
+#define DMA_PORT_H_
+
+#include "tb.h"
+
+struct tb_switch;
+struct tb_dma_port;
+
+#define DMA_PORT_CSS_ADDRESS		0x3fffff
+#define DMA_PORT_CSS_MAX_SIZE		SZ_128
+
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
+void dma_port_free(struct tb_dma_port *dma);
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+			void *buf, size_t size);
+int dma_port_flash_update_auth(struct tb_dma_port *dma);
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+			 const void *buf, size_t size);
+int dma_port_power_cycle(struct tb_dma_port *dma);
+
+#endif
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
new file mode 100644
index 0000000..092381e
--- /dev/null
+++ b/drivers/thunderbolt/domain.c
@@ -0,0 +1,804 @@
+/*
+ * Thunderbolt bus support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author:  Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <crypto/hash.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(tb_domain_ida);
+
+static bool match_service_id(const struct tb_service_id *id,
+			     const struct tb_service *svc)
+{
+	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
+		if (strcmp(id->protocol_key, svc->key))
+			return false;
+	}
+
+	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
+		if (id->protocol_id != svc->prtcid)
+			return false;
+	}
+
+	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+		if (id->protocol_version != svc->prtcvers)
+			return false;
+	}
+
+	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+		if (id->protocol_revision != svc->prtcrevs)
+			return false;
+	}
+
+	return true;
+}
+
+static const struct tb_service_id *__tb_service_match(struct device *dev,
+						      struct device_driver *drv)
+{
+	struct tb_service_driver *driver;
+	const struct tb_service_id *ids;
+	struct tb_service *svc;
+
+	svc = tb_to_service(dev);
+	if (!svc)
+		return NULL;
+
+	driver = container_of(drv, struct tb_service_driver, driver);
+	if (!driver->id_table)
+		return NULL;
+
+	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
+		if (match_service_id(ids, svc))
+			return ids;
+	}
+
+	return NULL;
+}
+
+static int tb_service_match(struct device *dev, struct device_driver *drv)
+{
+	return !!__tb_service_match(dev, drv);
+}
+
+static int tb_service_probe(struct device *dev)
+{
+	struct tb_service *svc = tb_to_service(dev);
+	struct tb_service_driver *driver;
+	const struct tb_service_id *id;
+
+	driver = container_of(dev->driver, struct tb_service_driver, driver);
+	id = __tb_service_match(dev, &driver->driver);
+
+	return driver->probe(svc, id);
+}
+
+static int tb_service_remove(struct device *dev)
+{
+	struct tb_service *svc = tb_to_service(dev);
+	struct tb_service_driver *driver;
+
+	driver = container_of(dev->driver, struct tb_service_driver, driver);
+	if (driver->remove)
+		driver->remove(svc);
+
+	return 0;
+}
+
+static void tb_service_shutdown(struct device *dev)
+{
+	struct tb_service_driver *driver;
+	struct tb_service *svc;
+
+	svc = tb_to_service(dev);
+	if (!svc || !dev->driver)
+		return;
+
+	driver = container_of(dev->driver, struct tb_service_driver, driver);
+	if (driver->shutdown)
+		driver->shutdown(svc);
+}
+
+static const char * const tb_security_names[] = {
+	[TB_SECURITY_NONE] = "none",
+	[TB_SECURITY_USER] = "user",
+	[TB_SECURITY_SECURE] = "secure",
+	[TB_SECURITY_DPONLY] = "dponly",
+	[TB_SECURITY_USBONLY] = "usbonly",
+};
+
+static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+	uuid_t *uuids;
+	ssize_t ret;
+	int i;
+
+	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+	if (!uuids)
+		return -ENOMEM;
+
+	pm_runtime_get_sync(&tb->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
+	if (ret) {
+		mutex_unlock(&tb->lock);
+		goto out;
+	}
+	mutex_unlock(&tb->lock);
+
+	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
+		if (!uuid_is_null(&uuids[i]))
+			ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
+					&uuids[i]);
+
+		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
+			       i < tb->nboot_acl - 1 ? "," : "\n");
+	}
+
+out:
+	pm_runtime_mark_last_busy(&tb->dev);
+	pm_runtime_put_autosuspend(&tb->dev);
+	kfree(uuids);
+
+	return ret;
+}
+
+static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+	char *str, *s, *uuid_str;
+	ssize_t ret = 0;
+	uuid_t *acl;
+	int i = 0;
+
+	/*
+	 * Make sure the value is not bigger than tb->nboot_acl * UUID
+	 * length + commas and optional "\n". Also the smallest allowable
+	 * string is tb->nboot_acl * ",".
+	 */
+	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
+		return -EINVAL;
+	if (count < tb->nboot_acl - 1)
+		return -EINVAL;
+
+	str = kstrdup(buf, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
+
+	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+	if (!acl) {
+		ret = -ENOMEM;
+		goto err_free_str;
+	}
+
+	uuid_str = strim(str);
+	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
+		size_t len = strlen(s);
+
+		if (len) {
+			if (len != UUID_STRING_LEN) {
+				ret = -EINVAL;
+				goto err_free_acl;
+			}
+			ret = uuid_parse(s, &acl[i]);
+			if (ret)
+				goto err_free_acl;
+		}
+
+		i++;
+	}
+
+	if (s || i < tb->nboot_acl) {
+		ret = -EINVAL;
+		goto err_free_acl;
+	}
+
+	pm_runtime_get_sync(&tb->dev);
+
+	if (mutex_lock_interruptible(&tb->lock)) {
+		ret = -ERESTARTSYS;
+		goto err_rpm_put;
+	}
+	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+	if (!ret) {
+		/* Notify userspace about the change */
+		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+	}
+	mutex_unlock(&tb->lock);
+
+err_rpm_put:
+	pm_runtime_mark_last_busy(&tb->dev);
+	pm_runtime_put_autosuspend(&tb->dev);
+err_free_acl:
+	kfree(acl);
+err_free_str:
+	kfree(str);
+
+	return ret ?: count;
+}
+static DEVICE_ATTR_RW(boot_acl);
+
+static ssize_t security_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+	const char *name = "unknown";
+
+	if (tb->security_level < ARRAY_SIZE(tb_security_names))
+		name = tb_security_names[tb->security_level];
+
+	return sprintf(buf, "%s\n", name);
+}
+static DEVICE_ATTR_RO(security);
+
+static struct attribute *domain_attrs[] = {
+	&dev_attr_boot_acl.attr,
+	&dev_attr_security.attr,
+	NULL,
+};
+
+static umode_t domain_attr_is_visible(struct kobject *kobj,
+				      struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct tb *tb = container_of(dev, struct tb, dev);
+
+	if (attr == &dev_attr_boot_acl.attr) {
+		if (tb->nboot_acl &&
+		    tb->cm_ops->get_boot_acl &&
+		    tb->cm_ops->set_boot_acl)
+			return attr->mode;
+		return 0;
+	}
+
+	return attr->mode;
+}
+
+static struct attribute_group domain_attr_group = {
+	.is_visible = domain_attr_is_visible,
+	.attrs = domain_attrs,
+};
+
+static const struct attribute_group *domain_attr_groups[] = {
+	&domain_attr_group,
+	NULL,
+};
+
+struct bus_type tb_bus_type = {
+	.name = "thunderbolt",
+	.match = tb_service_match,
+	.probe = tb_service_probe,
+	.remove = tb_service_remove,
+	.shutdown = tb_service_shutdown,
+};
+
+static void tb_domain_release(struct device *dev)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+
+	tb_ctl_free(tb->ctl);
+	destroy_workqueue(tb->wq);
+	ida_simple_remove(&tb_domain_ida, tb->index);
+	mutex_destroy(&tb->lock);
+	kfree(tb);
+}
+
+struct device_type tb_domain_type = {
+	.name = "thunderbolt_domain",
+	.release = tb_domain_release,
+};
+
+/**
+ * tb_domain_alloc() - Allocate a domain
+ * @nhi: Pointer to the host controller
+ * @privsize: Size of the connection manager private data
+ *
+ * Allocates and initializes a new Thunderbolt domain. Connection
+ * managers are expected to call this and then fill in @cm_ops
+ * accordingly.
+ *
+ * Call tb_domain_put() to release the domain before it has been added
+ * to the system.
+ *
+ * Return: allocated domain structure on %NULL in case of error
+ */
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
+{
+	struct tb *tb;
+
+	/*
+	 * Make sure the structure sizes map with that the hardware
+	 * expects because bit-fields are being used.
+	 */
+	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
+	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
+	if (!tb)
+		return NULL;
+
+	tb->nhi = nhi;
+	mutex_init(&tb->lock);
+
+	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+	if (tb->index < 0)
+		goto err_free;
+
+	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
+	if (!tb->wq)
+		goto err_remove_ida;
+
+	tb->dev.parent = &nhi->pdev->dev;
+	tb->dev.bus = &tb_bus_type;
+	tb->dev.type = &tb_domain_type;
+	tb->dev.groups = domain_attr_groups;
+	dev_set_name(&tb->dev, "domain%d", tb->index);
+	device_initialize(&tb->dev);
+
+	return tb;
+
+err_remove_ida:
+	ida_simple_remove(&tb_domain_ida, tb->index);
+err_free:
+	kfree(tb);
+
+	return NULL;
+}
+
+static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+			       const void *buf, size_t size)
+{
+	struct tb *tb = data;
+
+	if (!tb->cm_ops->handle_event) {
+		tb_warn(tb, "domain does not have event handler\n");
+		return true;
+	}
+
+	switch (type) {
+	case TB_CFG_PKG_XDOMAIN_REQ:
+	case TB_CFG_PKG_XDOMAIN_RESP:
+		return tb_xdomain_handle_request(tb, type, buf, size);
+
+	default:
+		tb->cm_ops->handle_event(tb, type, buf, size);
+	}
+
+	return true;
+}
+
+/**
+ * tb_domain_add() - Add domain to the system
+ * @tb: Domain to add
+ *
+ * Starts the domain and adds it to the system. Hotplugging devices will
+ * work after this has been returned successfully. In order to remove
+ * and release the domain after this function has been called, call
+ * tb_domain_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_domain_add(struct tb *tb)
+{
+	int ret;
+
+	if (WARN_ON(!tb->cm_ops))
+		return -EINVAL;
+
+	mutex_lock(&tb->lock);
+
+	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
+	if (!tb->ctl) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	/*
+	 * tb_schedule_hotplug_handler may be called as soon as the config
+	 * channel is started. Thats why we have to hold the lock here.
+	 */
+	tb_ctl_start(tb->ctl);
+
+	if (tb->cm_ops->driver_ready) {
+		ret = tb->cm_ops->driver_ready(tb);
+		if (ret)
+			goto err_ctl_stop;
+	}
+
+	ret = device_add(&tb->dev);
+	if (ret)
+		goto err_ctl_stop;
+
+	/* Start the domain */
+	if (tb->cm_ops->start) {
+		ret = tb->cm_ops->start(tb);
+		if (ret)
+			goto err_domain_del;
+	}
+
+	/* This starts event processing */
+	mutex_unlock(&tb->lock);
+
+	pm_runtime_no_callbacks(&tb->dev);
+	pm_runtime_set_active(&tb->dev);
+	pm_runtime_enable(&tb->dev);
+	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
+	pm_runtime_mark_last_busy(&tb->dev);
+	pm_runtime_use_autosuspend(&tb->dev);
+
+	return 0;
+
+err_domain_del:
+	device_del(&tb->dev);
+err_ctl_stop:
+	tb_ctl_stop(tb->ctl);
+err_unlock:
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+/**
+ * tb_domain_remove() - Removes and releases a domain
+ * @tb: Domain to remove
+ *
+ * Stops the domain, removes it from the system and releases all
+ * resources once the last reference has been released.
+ */
+void tb_domain_remove(struct tb *tb)
+{
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->stop)
+		tb->cm_ops->stop(tb);
+	/* Stop the domain control traffic */
+	tb_ctl_stop(tb->ctl);
+	mutex_unlock(&tb->lock);
+
+	flush_workqueue(tb->wq);
+	device_unregister(&tb->dev);
+}
+
+/**
+ * tb_domain_suspend_noirq() - Suspend a domain
+ * @tb: Domain to suspend
+ *
+ * Suspends all devices in the domain and stops the control channel.
+ */
+int tb_domain_suspend_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	/*
+	 * The control channel interrupt is left enabled during suspend
+	 * and taking the lock here prevents any events happening before
+	 * we actually have stopped the domain and the control channel.
+	 */
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->suspend_noirq)
+		ret = tb->cm_ops->suspend_noirq(tb);
+	if (!ret)
+		tb_ctl_stop(tb->ctl);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+/**
+ * tb_domain_resume_noirq() - Resume a domain
+ * @tb: Domain to resume
+ *
+ * Re-starts the control channel, and resumes all devices connected to
+ * the domain.
+ */
+int tb_domain_resume_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	mutex_lock(&tb->lock);
+	tb_ctl_start(tb->ctl);
+	if (tb->cm_ops->resume_noirq)
+		ret = tb->cm_ops->resume_noirq(tb);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+int tb_domain_suspend(struct tb *tb)
+{
+	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
+}
+
+void tb_domain_complete(struct tb *tb)
+{
+	if (tb->cm_ops->complete)
+		tb->cm_ops->complete(tb);
+}
+
+int tb_domain_runtime_suspend(struct tb *tb)
+{
+	if (tb->cm_ops->runtime_suspend) {
+		int ret = tb->cm_ops->runtime_suspend(tb);
+		if (ret)
+			return ret;
+	}
+	tb_ctl_stop(tb->ctl);
+	return 0;
+}
+
+int tb_domain_runtime_resume(struct tb *tb)
+{
+	tb_ctl_start(tb->ctl);
+	if (tb->cm_ops->runtime_resume) {
+		int ret = tb->cm_ops->runtime_resume(tb);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ * tb_domain_approve_switch() - Approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * This will approve switch by connection manager specific means. In
+ * case of success the connection manager will create tunnels for all
+ * supported protocols.
+ */
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+
+	if (!tb->cm_ops->approve_switch)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_approve_switch_key() - Approve switch and add key
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function first adds
+ * key to the switch NVM using connection manager specific means. If
+ * adding the key is successful, the switch is approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+	int ret;
+
+	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	ret = tb->cm_ops->add_switch_key(tb, sw);
+	if (ret)
+		return ret;
+
+	return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_challenge_switch_key() - Challenge and approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function generates
+ * random challenge and sends it to the switch. The switch responds to
+ * this and if the response matches our random challenge, the switch is
+ * approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	u8 challenge[TB_SWITCH_KEY_SIZE];
+	u8 response[TB_SWITCH_KEY_SIZE];
+	u8 hmac[TB_SWITCH_KEY_SIZE];
+	struct tb_switch *parent_sw;
+	struct crypto_shash *tfm;
+	struct shash_desc *shash;
+	int ret;
+
+	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	get_random_bytes(challenge, sizeof(challenge));
+	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
+	if (ret)
+		return ret;
+
+	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
+	if (ret)
+		goto err_free_tfm;
+
+	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+			GFP_KERNEL);
+	if (!shash) {
+		ret = -ENOMEM;
+		goto err_free_tfm;
+	}
+
+	shash->tfm = tfm;
+	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	memset(hmac, 0, sizeof(hmac));
+	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
+	if (ret)
+		goto err_free_shash;
+
+	/* The returned HMAC must match the one we calculated */
+	if (memcmp(response, hmac, sizeof(hmac))) {
+		ret = -EKEYREJECTED;
+		goto err_free_shash;
+	}
+
+	crypto_free_shash(tfm);
+	kfree(shash);
+
+	return tb->cm_ops->approve_switch(tb, sw);
+
+err_free_shash:
+	kfree(shash);
+err_free_tfm:
+	crypto_free_shash(tfm);
+
+	return ret;
+}
+
+/**
+ * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
+ * @tb: Domain whose PCIe paths to disconnect
+ *
+ * This needs to be called in preparation for NVM upgrade of the host
+ * controller. Makes sure all PCIe paths are disconnected.
+ *
+ * Return %0 on success and negative errno in case of error.
+ */
+int tb_domain_disconnect_pcie_paths(struct tb *tb)
+{
+	if (!tb->cm_ops->disconnect_pcie_paths)
+		return -EPERM;
+
+	return tb->cm_ops->disconnect_pcie_paths(tb);
+}
+
+/**
+ * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
+ * @tb: Domain enabling the DMA paths
+ * @xd: XDomain DMA paths are created to
+ *
+ * Calls connection manager specific method to enable DMA paths to the
+ * XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	if (!tb->cm_ops->approve_xdomain_paths)
+		return -ENOTSUPP;
+
+	return tb->cm_ops->approve_xdomain_paths(tb, xd);
+}
+
+/**
+ * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
+ * @tb: Domain disabling the DMA paths
+ * @xd: XDomain whose DMA paths are disconnected
+ *
+ * Calls connection manager specific method to disconnect DMA paths to
+ * the XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	if (!tb->cm_ops->disconnect_xdomain_paths)
+		return -ENOTSUPP;
+
+	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
+}
+
+static int disconnect_xdomain(struct device *dev, void *data)
+{
+	struct tb_xdomain *xd;
+	struct tb *tb = data;
+	int ret = 0;
+
+	xd = tb_to_xdomain(dev);
+	if (xd && xd->tb == tb)
+		ret = tb_xdomain_disable_paths(xd);
+
+	return ret;
+}
+
+/**
+ * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
+ * @tb: Domain whose paths are disconnected
+ *
+ * This function can be used to disconnect all paths (PCIe, XDomain) for
+ * example in preparation for host NVM firmware upgrade. After this is
+ * called the paths cannot be established without resetting the switch.
+ *
+ * Return: %0 in case of success and negative errno otherwise.
+ */
+int tb_domain_disconnect_all_paths(struct tb *tb)
+{
+	int ret;
+
+	ret = tb_domain_disconnect_pcie_paths(tb);
+	if (ret)
+		return ret;
+
+	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
+}
+
+int tb_domain_init(void)
+{
+	int ret;
+
+	ret = tb_xdomain_init();
+	if (ret)
+		return ret;
+	ret = bus_register(&tb_bus_type);
+	if (ret)
+		tb_xdomain_exit();
+
+	return ret;
+}
+
+void tb_domain_exit(void)
+{
+	bus_unregister(&tb_bus_type);
+	ida_destroy(&tb_domain_ida);
+	tb_switch_exit();
+	tb_xdomain_exit();
+}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
new file mode 100644
index 0000000..3e8caf2
--- /dev/null
+++ b/drivers/thunderbolt/eeprom.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - eeprom access
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include "tb.h"
+
+/**
+ * tb_eeprom_ctl_write() - write control word
+ */
+static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+	return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+/**
+ * tb_eeprom_ctl_write() - read control word
+ */
+static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+	return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+enum tb_eeprom_transfer {
+	TB_EEPROM_IN,
+	TB_EEPROM_OUT,
+};
+
+/**
+ * tb_eeprom_active - enable rom access
+ *
+ * WARNING: Always disable access after usage. Otherwise the controller will
+ * fail to reprobe.
+ */
+static int tb_eeprom_active(struct tb_switch *sw, bool enable)
+{
+	struct tb_eeprom_ctl ctl;
+	int res = tb_eeprom_ctl_read(sw, &ctl);
+	if (res)
+		return res;
+	if (enable) {
+		ctl.access_high = 1;
+		res = tb_eeprom_ctl_write(sw, &ctl);
+		if (res)
+			return res;
+		ctl.access_low = 0;
+		return tb_eeprom_ctl_write(sw, &ctl);
+	} else {
+		ctl.access_low = 1;
+		res = tb_eeprom_ctl_write(sw, &ctl);
+		if (res)
+			return res;
+		ctl.access_high = 0;
+		return tb_eeprom_ctl_write(sw, &ctl);
+	}
+}
+
+/**
+ * tb_eeprom_transfer - transfer one bit
+ *
+ * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
+ * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
+ */
+static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
+			      enum tb_eeprom_transfer direction)
+{
+	int res;
+	if (direction == TB_EEPROM_OUT) {
+		res = tb_eeprom_ctl_write(sw, ctl);
+		if (res)
+			return res;
+	}
+	ctl->clock = 1;
+	res = tb_eeprom_ctl_write(sw, ctl);
+	if (res)
+		return res;
+	if (direction == TB_EEPROM_IN) {
+		res = tb_eeprom_ctl_read(sw, ctl);
+		if (res)
+			return res;
+	}
+	ctl->clock = 0;
+	return tb_eeprom_ctl_write(sw, ctl);
+}
+
+/**
+ * tb_eeprom_out - write one byte to the bus
+ */
+static int tb_eeprom_out(struct tb_switch *sw, u8 val)
+{
+	struct tb_eeprom_ctl ctl;
+	int i;
+	int res = tb_eeprom_ctl_read(sw, &ctl);
+	if (res)
+		return res;
+	for (i = 0; i < 8; i++) {
+		ctl.data_out = val & 0x80;
+		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
+		if (res)
+			return res;
+		val <<= 1;
+	}
+	return 0;
+}
+
+/**
+ * tb_eeprom_in - read one byte from the bus
+ */
+static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
+{
+	struct tb_eeprom_ctl ctl;
+	int i;
+	int res = tb_eeprom_ctl_read(sw, &ctl);
+	if (res)
+		return res;
+	*val = 0;
+	for (i = 0; i < 8; i++) {
+		*val <<= 1;
+		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
+		if (res)
+			return res;
+		*val |= ctl.data_in;
+	}
+	return 0;
+}
+
+/**
+ * tb_eeprom_read_n - read count bytes from offset into val
+ */
+static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+		size_t count)
+{
+	int i, res;
+	res = tb_eeprom_active(sw, true);
+	if (res)
+		return res;
+	res = tb_eeprom_out(sw, 3);
+	if (res)
+		return res;
+	res = tb_eeprom_out(sw, offset >> 8);
+	if (res)
+		return res;
+	res = tb_eeprom_out(sw, offset);
+	if (res)
+		return res;
+	for (i = 0; i < count; i++) {
+		res = tb_eeprom_in(sw, val + i);
+		if (res)
+			return res;
+	}
+	return tb_eeprom_active(sw, false);
+}
+
+static u8 tb_crc8(u8 *data, int len)
+{
+	int i, j;
+	u8 val = 0xff;
+	for (i = 0; i < len; i++) {
+		val ^= data[i];
+		for (j = 0; j < 8; j++)
+			val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
+	}
+	return val;
+}
+
+static u32 tb_crc32(void *data, size_t len)
+{
+	return ~__crc32c_le(~0, data, len);
+}
+
+#define TB_DROM_DATA_START 13
+struct tb_drom_header {
+	/* BYTE 0 */
+	u8 uid_crc8; /* checksum for uid */
+	/* BYTES 1-8 */
+	u64 uid;
+	/* BYTES 9-12 */
+	u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
+	/* BYTE 13 */
+	u8 device_rom_revision; /* should be <= 1 */
+	u16 data_len:10;
+	u8 __unknown1:6;
+	/* BYTES 16-21 */
+	u16 vendor_id;
+	u16 model_id;
+	u8 model_rev;
+	u8 eeprom_rev;
+} __packed;
+
+enum tb_drom_entry_type {
+	/* force unsigned to prevent "one-bit signed bitfield" warning */
+	TB_DROM_ENTRY_GENERIC = 0U,
+	TB_DROM_ENTRY_PORT,
+};
+
+struct tb_drom_entry_header {
+	u8 len;
+	u8 index:6;
+	bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
+	enum tb_drom_entry_type type:1;
+} __packed;
+
+struct tb_drom_entry_generic {
+	struct tb_drom_entry_header header;
+	u8 data[0];
+} __packed;
+
+struct tb_drom_entry_port {
+	/* BYTES 0-1 */
+	struct tb_drom_entry_header header;
+	/* BYTE 2 */
+	u8 dual_link_port_rid:4;
+	u8 link_nr:1;
+	u8 unknown1:2;
+	bool has_dual_link_port:1;
+
+	/* BYTE 3 */
+	u8 dual_link_port_nr:6;
+	u8 unknown2:2;
+
+	/* BYTES 4 - 5 TODO decode */
+	u8 micro2:4;
+	u8 micro1:4;
+	u8 micro3;
+
+	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
+	u8 peer_port_rid:4;
+	u8 unknown3:3;
+	bool has_peer_port:1;
+	u8 peer_port_nr:6;
+	u8 unknown4:2;
+} __packed;
+
+
+/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+	struct tb_cap_plug_events cap;
+	int res;
+	if (!sw->cap_plug_events) {
+		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+		return -ENOSYS;
+	}
+	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+			     sizeof(cap) / 4);
+	if (res)
+		return res;
+
+	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+		tb_sw_warn(sw, "no NVM\n");
+		return -ENOSYS;
+	}
+
+	if (cap.drom_offset > 0xffff) {
+		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+				cap.drom_offset);
+		return -ENXIO;
+	}
+	*offset = cap.drom_offset;
+	return 0;
+}
+
+/**
+ * tb_drom_read_uid_only - read uid directly from drom
+ *
+ * Does not use the cached copy in sw->drom. Used during resume to check switch
+ * identity.
+ */
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
+{
+	u8 data[9];
+	u16 drom_offset;
+	u8 crc;
+	int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+	if (res)
+		return res;
+
+	if (drom_offset == 0)
+		return -ENODEV;
+
+	/* read uid */
+	res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+	if (res)
+		return res;
+
+	crc = tb_crc8(data + 1, 8);
+	if (crc != data[0]) {
+		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
+				data[0], crc);
+		return -EIO;
+	}
+
+	*uid = *(u64 *)(data+1);
+	return 0;
+}
+
+static int tb_drom_parse_entry_generic(struct tb_switch *sw,
+		struct tb_drom_entry_header *header)
+{
+	const struct tb_drom_entry_generic *entry =
+		(const struct tb_drom_entry_generic *)header;
+
+	switch (header->index) {
+	case 1:
+		/* Length includes 2 bytes header so remove it before copy */
+		sw->vendor_name = kstrndup(entry->data,
+			header->len - sizeof(*header), GFP_KERNEL);
+		if (!sw->vendor_name)
+			return -ENOMEM;
+		break;
+
+	case 2:
+		sw->device_name = kstrndup(entry->data,
+			header->len - sizeof(*header), GFP_KERNEL);
+		if (!sw->device_name)
+			return -ENOMEM;
+		break;
+	}
+
+	return 0;
+}
+
+static int tb_drom_parse_entry_port(struct tb_switch *sw,
+				    struct tb_drom_entry_header *header)
+{
+	struct tb_port *port;
+	int res;
+	enum tb_port_type type;
+
+	/*
+	 * Some DROMs list more ports than the controller actually has
+	 * so we skip those but allow the parser to continue.
+	 */
+	if (header->index > sw->config.max_port_number) {
+		dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+		return 0;
+	}
+
+	port = &sw->ports[header->index];
+	port->disabled = header->port_disabled;
+	if (port->disabled)
+		return 0;
+
+	res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
+	if (res)
+		return res;
+	type &= 0xffffff;
+
+	if (type == TB_TYPE_PORT) {
+		struct tb_drom_entry_port *entry = (void *) header;
+		if (header->len != sizeof(*entry)) {
+			tb_sw_warn(sw,
+				"port entry has size %#x (expected %#zx)\n",
+				header->len, sizeof(struct tb_drom_entry_port));
+			return -EIO;
+		}
+		port->link_nr = entry->link_nr;
+		if (entry->has_dual_link_port)
+			port->dual_link_port =
+				&port->sw->ports[entry->dual_link_port_nr];
+	}
+	return 0;
+}
+
+/**
+ * tb_drom_parse_entries - parse the linked list of drom entries
+ *
+ * Drom must have been copied to sw->drom.
+ */
+static int tb_drom_parse_entries(struct tb_switch *sw)
+{
+	struct tb_drom_header *header = (void *) sw->drom;
+	u16 pos = sizeof(*header);
+	u16 drom_size = header->data_len + TB_DROM_DATA_START;
+	int res;
+
+	while (pos < drom_size) {
+		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
+		if (pos + 1 == drom_size || pos + entry->len > drom_size
+				|| !entry->len) {
+			tb_sw_warn(sw, "drom buffer overrun, aborting\n");
+			return -EIO;
+		}
+
+		switch (entry->type) {
+		case TB_DROM_ENTRY_GENERIC:
+			res = tb_drom_parse_entry_generic(sw, entry);
+			break;
+		case TB_DROM_ENTRY_PORT:
+			res = tb_drom_parse_entry_port(sw, entry);
+			break;
+		}
+		if (res)
+			return res;
+
+		pos += entry->len;
+	}
+	return 0;
+}
+
+/**
+ * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
+ */
+static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
+{
+	struct device *dev = &sw->tb->nhi->pdev->dev;
+	int len, res;
+
+	len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+	if (len < 0 || len < sizeof(struct tb_drom_header))
+		return -EINVAL;
+
+	sw->drom = kmalloc(len, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+
+	res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
+									len);
+	if (res)
+		goto err;
+
+	*size = ((struct tb_drom_header *)sw->drom)->data_len +
+							  TB_DROM_DATA_START;
+	if (*size > len)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(sw->drom);
+	sw->drom = NULL;
+	return -EINVAL;
+}
+
+static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
+{
+	u32 drom_offset;
+	int ret;
+
+	if (!sw->dma_port)
+		return -ENODEV;
+
+	ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
+			 sw->cap_plug_events + 12, 1);
+	if (ret)
+		return ret;
+
+	if (!drom_offset)
+		return -ENODEV;
+
+	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
+				  sizeof(*size));
+	if (ret)
+		return ret;
+
+	/* Size includes CRC8 + UID + CRC32 */
+	*size += 1 + 8 + 4;
+	sw->drom = kzalloc(*size, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+
+	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
+	if (ret)
+		goto err_free;
+
+	/*
+	 * Read UID from the minimal DROM because the one in NVM is just
+	 * a placeholder.
+	 */
+	tb_drom_read_uid_only(sw, &sw->uid);
+	return 0;
+
+err_free:
+	kfree(sw->drom);
+	sw->drom = NULL;
+	return ret;
+}
+
+/**
+ * tb_drom_read - copy drom to sw->drom and parse it
+ */
+int tb_drom_read(struct tb_switch *sw)
+{
+	u16 drom_offset;
+	u16 size;
+	u32 crc;
+	struct tb_drom_header *header;
+	int res;
+	if (sw->drom)
+		return 0;
+
+	if (tb_route(sw) == 0) {
+		/*
+		 * Apple's NHI EFI driver supplies a DROM for the root switch
+		 * in a device property. Use it if available.
+		 */
+		if (tb_drom_copy_efi(sw, &size) == 0)
+			goto parse;
+
+		/* Non-Apple hardware has the DROM as part of NVM */
+		if (tb_drom_copy_nvm(sw, &size) == 0)
+			goto parse;
+
+		/*
+		 * The root switch contains only a dummy drom (header only,
+		 * no entries). Hardcode the configuration here.
+		 */
+		tb_drom_read_uid_only(sw, &sw->uid);
+
+		sw->ports[1].link_nr = 0;
+		sw->ports[2].link_nr = 1;
+		sw->ports[1].dual_link_port = &sw->ports[2];
+		sw->ports[2].dual_link_port = &sw->ports[1];
+
+		sw->ports[3].link_nr = 0;
+		sw->ports[4].link_nr = 1;
+		sw->ports[3].dual_link_port = &sw->ports[4];
+		sw->ports[4].dual_link_port = &sw->ports[3];
+
+		/* Port 5 is inaccessible on this gen 1 controller */
+		if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
+			sw->ports[5].disabled = true;
+
+		return 0;
+	}
+
+	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+	if (res)
+		return res;
+
+	res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+	if (res)
+		return res;
+	size &= 0x3ff;
+	size += TB_DROM_DATA_START;
+	tb_sw_info(sw, "reading drom (length: %#x)\n", size);
+	if (size < sizeof(*header)) {
+		tb_sw_warn(sw, "drom too small, aborting\n");
+		return -EIO;
+	}
+
+	sw->drom = kzalloc(size, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+	res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+	if (res)
+		goto err;
+
+parse:
+	header = (void *) sw->drom;
+
+	if (header->data_len + TB_DROM_DATA_START != size) {
+		tb_sw_warn(sw, "drom size mismatch, aborting\n");
+		goto err;
+	}
+
+	crc = tb_crc8((u8 *) &header->uid, 8);
+	if (crc != header->uid_crc8) {
+		tb_sw_warn(sw,
+			"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
+			header->uid_crc8, crc);
+		goto err;
+	}
+	if (!sw->uid)
+		sw->uid = header->uid;
+	sw->vendor = header->vendor_id;
+	sw->device = header->model_id;
+
+	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
+	if (crc != header->data_crc32) {
+		tb_sw_warn(sw,
+			"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
+			header->data_crc32, crc);
+	}
+
+	if (header->device_rom_revision > 2)
+		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
+			header->device_rom_revision);
+
+	return tb_drom_parse_entries(sw);
+err:
+	kfree(sw->drom);
+	sw->drom = NULL;
+	return -EIO;
+
+}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
new file mode 100644
index 0000000..28fc4ce
--- /dev/null
+++ b/drivers/thunderbolt/icm.c
@@ -0,0 +1,2016 @@
+/*
+ * Internal Thunderbolt Connection Manager. This is a firmware running on
+ * the Thunderbolt host controller performing most of the low-level
+ * handling.
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define PCIE2CIO_CMD			0x30
+#define PCIE2CIO_CMD_TIMEOUT		BIT(31)
+#define PCIE2CIO_CMD_START		BIT(30)
+#define PCIE2CIO_CMD_WRITE		BIT(21)
+#define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
+#define PCIE2CIO_CMD_CS_SHIFT		19
+#define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
+#define PCIE2CIO_CMD_PORT_SHIFT		13
+
+#define PCIE2CIO_WRDATA			0x34
+#define PCIE2CIO_RDDATA			0x38
+
+#define PHY_PORT_CS1			0x37
+#define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
+#define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
+#define PHY_PORT_CS1_LINK_STATE_SHIFT	26
+
+#define ICM_TIMEOUT			5000	/* ms */
+#define ICM_APPROVE_TIMEOUT		10000	/* ms */
+#define ICM_MAX_LINK			4
+#define ICM_MAX_DEPTH			6
+
+/**
+ * struct icm - Internal connection manager private data
+ * @request_lock: Makes sure only one message is send to ICM at time
+ * @rescan_work: Work used to rescan the surviving switches after resume
+ * @upstream_port: Pointer to the PCIe upstream port this host
+ *		   controller is connected. This is only set for systems
+ *		   where ICM needs to be started manually
+ * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
+ *	     (only set when @upstream_port is not %NULL)
+ * @safe_mode: ICM is in safe mode
+ * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
+ * @rpm: Does the controller support runtime PM (RTD3)
+ * @is_supported: Checks if we can support ICM on this controller
+ * @get_mode: Read and return the ICM firmware mode (optional)
+ * @get_route: Find a route string for given switch
+ * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
+ * @driver_ready: Send driver ready message to ICM
+ * @device_connected: Handle device connected ICM message
+ * @device_disconnected: Handle device disconnected ICM message
+ * @xdomain_connected - Handle XDomain connected ICM message
+ * @xdomain_disconnected - Handle XDomain disconnected ICM message
+ */
+struct icm {
+	struct mutex request_lock;
+	struct delayed_work rescan_work;
+	struct pci_dev *upstream_port;
+	size_t max_boot_acl;
+	int vnd_cap;
+	bool safe_mode;
+	bool rpm;
+	bool (*is_supported)(struct tb *tb);
+	int (*get_mode)(struct tb *tb);
+	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+	void (*save_devices)(struct tb *tb);
+	int (*driver_ready)(struct tb *tb,
+			    enum tb_security_level *security_level,
+			    size_t *nboot_acl, bool *rpm);
+	void (*device_connected)(struct tb *tb,
+				 const struct icm_pkg_header *hdr);
+	void (*device_disconnected)(struct tb *tb,
+				    const struct icm_pkg_header *hdr);
+	void (*xdomain_connected)(struct tb *tb,
+				  const struct icm_pkg_header *hdr);
+	void (*xdomain_disconnected)(struct tb *tb,
+				     const struct icm_pkg_header *hdr);
+};
+
+struct icm_notification {
+	struct work_struct work;
+	struct icm_pkg_header *pkg;
+	struct tb *tb;
+};
+
+struct ep_name_entry {
+	u8 len;
+	u8 type;
+	u8 data[0];
+};
+
+#define EP_NAME_INTEL_VSS	0x10
+
+/* Intel Vendor specific structure */
+struct intel_vss {
+	u16 vendor;
+	u16 model;
+	u8 mc;
+	u8 flags;
+	u16 pci_devid;
+	u32 nvm_version;
+};
+
+#define INTEL_VSS_FLAGS_RTD3	BIT(0)
+
+static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
+{
+	const void *end = ep_name + size;
+
+	while (ep_name < end) {
+		const struct ep_name_entry *ep = ep_name;
+
+		if (!ep->len)
+			break;
+		if (ep_name + ep->len > end)
+			break;
+
+		if (ep->type == EP_NAME_INTEL_VSS)
+			return (const struct intel_vss *)ep->data;
+
+		ep_name += ep->len;
+	}
+
+	return NULL;
+}
+
+static inline struct tb *icm_to_tb(struct icm *icm)
+{
+	return ((void *)icm - sizeof(struct tb));
+}
+
+static inline u8 phy_port_from_route(u64 route, u8 depth)
+{
+	u8 link;
+
+	link = depth ? route >> ((depth - 1) * 8) : route;
+	return tb_phy_port_from_link(link);
+}
+
+static inline u8 dual_link_from_link(u8 link)
+{
+	return link ? ((link - 1) ^ 0x01) + 1 : 0;
+}
+
+static inline u64 get_route(u32 route_hi, u32 route_lo)
+{
+	return (u64)route_hi << 32 | route_lo;
+}
+
+static inline u64 get_parent_route(u64 route)
+{
+	int depth = tb_route_length(route);
+	return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
+}
+
+static bool icm_match(const struct tb_cfg_request *req,
+		      const struct ctl_pkg *pkg)
+{
+	const struct icm_pkg_header *res_hdr = pkg->buffer;
+	const struct icm_pkg_header *req_hdr = req->request;
+
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (res_hdr->code != req_hdr->code)
+		return false;
+
+	return true;
+}
+
+static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	const struct icm_pkg_header *hdr = pkg->buffer;
+
+	if (hdr->packet_id < req->npackets) {
+		size_t offset = hdr->packet_id * req->response_size;
+
+		memcpy(req->response + offset, pkg->buffer, req->response_size);
+	}
+
+	return hdr->packet_id == hdr->total_packets - 1;
+}
+
+static int icm_request(struct tb *tb, const void *request, size_t request_size,
+		       void *response, size_t response_size, size_t npackets,
+		       unsigned int timeout_msec)
+{
+	struct icm *icm = tb_priv(tb);
+	int retries = 3;
+
+	do {
+		struct tb_cfg_request *req;
+		struct tb_cfg_result res;
+
+		req = tb_cfg_request_alloc();
+		if (!req)
+			return -ENOMEM;
+
+		req->match = icm_match;
+		req->copy = icm_copy;
+		req->request = request;
+		req->request_size = request_size;
+		req->request_type = TB_CFG_PKG_ICM_CMD;
+		req->response = response;
+		req->npackets = npackets;
+		req->response_size = response_size;
+		req->response_type = TB_CFG_PKG_ICM_RESP;
+
+		mutex_lock(&icm->request_lock);
+		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
+		mutex_unlock(&icm->request_lock);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			return res.err == 1 ? -EIO : res.err;
+
+		usleep_range(20, 50);
+	} while (retries--);
+
+	return -ETIMEDOUT;
+}
+
+static bool icm_fr_is_supported(struct tb *tb)
+{
+	return !x86_apple_machine;
+}
+
+static inline int icm_fr_get_switch_index(u32 port)
+{
+	int index;
+
+	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
+		return 0;
+
+	index = port >> ICM_PORT_INDEX_SHIFT;
+	return index != 0xff ? index : 0;
+}
+
+static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+	struct icm_fr_pkg_get_topology_response *switches, *sw;
+	struct icm_fr_pkg_get_topology request = {
+		.hdr = { .code = ICM_GET_TOPOLOGY },
+	};
+	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
+	int ret, index;
+	u8 i;
+
+	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
+	if (!switches)
+		return -ENOMEM;
+
+	ret = icm_request(tb, &request, sizeof(request), switches,
+			  sizeof(*switches), npackets, ICM_TIMEOUT);
+	if (ret)
+		goto err_free;
+
+	sw = &switches[0];
+	index = icm_fr_get_switch_index(sw->ports[link]);
+	if (!index) {
+		ret = -ENODEV;
+		goto err_free;
+	}
+
+	sw = &switches[index];
+	for (i = 1; i < depth; i++) {
+		unsigned int j;
+
+		if (!(sw->first_data & ICM_SWITCH_USED)) {
+			ret = -ENODEV;
+			goto err_free;
+		}
+
+		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
+			index = icm_fr_get_switch_index(sw->ports[j]);
+			if (index > sw->switch_index) {
+				sw = &switches[index];
+				break;
+			}
+		}
+	}
+
+	*route = get_route(sw->route_hi, sw->route_lo);
+
+err_free:
+	kfree(switches);
+	return ret;
+}
+
+static void icm_fr_save_devices(struct tb *tb)
+{
+	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
+static int
+icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl, bool *rpm)
+{
+	struct icm_fr_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
+
+	return 0;
+}
+
+static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_fr_pkg_approve_device request;
+	struct icm_fr_pkg_approve_device reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_APPROVE_DEVICE;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+
+	memset(&reply, 0, sizeof(reply));
+	/* Use larger timeout as establishing tunnels can take some time */
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_APPROVE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "PCIe tunnel creation failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_fr_pkg_add_device_key request;
+	struct icm_fr_pkg_add_device_key_response reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_ADD_DEVICE_KEY;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "Adding key to switch failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+				       const u8 *challenge, u8 *response)
+{
+	struct icm_fr_pkg_challenge_device request;
+	struct icm_fr_pkg_challenge_device_response reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_CHALLENGE_DEVICE;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EKEYREJECTED;
+	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+		return -ENOKEY;
+
+	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+	return 0;
+}
+
+static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	struct icm_fr_pkg_approve_xdomain_response reply;
+	struct icm_fr_pkg_approve_xdomain request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	request.hdr.code = ICM_APPROVE_XDOMAIN;
+	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
+	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+	request.transmit_path = xd->transmit_path;
+	request.transmit_ring = xd->transmit_ring;
+	request.receive_path = xd->receive_path;
+	request.receive_ring = xd->receive_ring;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	u8 phy_port;
+	u8 cmd;
+
+	phy_port = tb_phy_port_from_link(xd->link);
+	if (phy_port == 0)
+		cmd = NHI_MAILBOX_DISCONNECT_PA;
+	else
+		cmd = NHI_MAILBOX_DISCONNECT_PB;
+
+	nhi_mailbox_cmd(tb->nhi, cmd, 1);
+	usleep_range(10, 50);
+	nhi_mailbox_cmd(tb->nhi, cmd, 2);
+	return 0;
+}
+
+static void add_switch(struct tb_switch *parent_sw, u64 route,
+		       const uuid_t *uuid, const u8 *ep_name,
+		       size_t ep_name_size, u8 connection_id, u8 connection_key,
+		       u8 link, u8 depth, enum tb_security_level security_level,
+		       bool authorized, bool boot)
+{
+	const struct intel_vss *vss;
+	struct tb_switch *sw;
+
+	pm_runtime_get_sync(&parent_sw->dev);
+
+	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
+	if (!sw)
+		goto out;
+
+	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+	sw->connection_id = connection_id;
+	sw->connection_key = connection_key;
+	sw->link = link;
+	sw->depth = depth;
+	sw->authorized = authorized;
+	sw->security_level = security_level;
+	sw->boot = boot;
+
+	vss = parse_intel_vss(ep_name, ep_name_size);
+	if (vss)
+		sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+	/* Link the two switches now */
+	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+
+	if (tb_switch_add(sw)) {
+		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+		tb_switch_put(sw);
+	}
+
+out:
+	pm_runtime_mark_last_busy(&parent_sw->dev);
+	pm_runtime_put_autosuspend(&parent_sw->dev);
+}
+
+static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
+			  u64 route, u8 connection_id, u8 connection_key,
+			  u8 link, u8 depth, bool boot)
+{
+	/* Disconnect from parent */
+	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+	/* Re-connect via updated port*/
+	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+
+	/* Update with the new addressing information */
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
+	sw->connection_id = connection_id;
+	sw->connection_key = connection_key;
+	sw->link = link;
+	sw->depth = depth;
+	sw->boot = boot;
+
+	/* This switch still exists */
+	sw->is_unplugged = false;
+}
+
+static void remove_switch(struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+
+	parent_sw = tb_to_switch(sw->dev.parent);
+	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+	tb_switch_remove(sw);
+}
+
+static void add_xdomain(struct tb_switch *sw, u64 route,
+			const uuid_t *local_uuid, const uuid_t *remote_uuid,
+			u8 link, u8 depth)
+{
+	struct tb_xdomain *xd;
+
+	pm_runtime_get_sync(&sw->dev);
+
+	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
+	if (!xd)
+		goto out;
+
+	xd->link = link;
+	xd->depth = depth;
+
+	tb_port_at(route, sw)->xdomain = xd;
+
+	tb_xdomain_add(xd);
+
+out:
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+}
+
+static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
+{
+	xd->link = link;
+	xd->route = route;
+	xd->is_unplugged = false;
+}
+
+static void remove_xdomain(struct tb_xdomain *xd)
+{
+	struct tb_switch *sw;
+
+	sw = tb_to_switch(xd->dev.parent);
+	tb_port_at(xd->route, sw)->xdomain = NULL;
+	tb_xdomain_remove(xd);
+}
+
+static void
+icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_device_connected *pkg =
+		(const struct icm_fr_event_device_connected *)hdr;
+	enum tb_security_level security_level;
+	struct tb_switch *sw, *parent_sw;
+	struct icm *icm = tb_priv(tb);
+	bool authorized = false;
+	struct tb_xdomain *xd;
+	u8 link, depth;
+	bool boot;
+	u64 route;
+	int ret;
+
+	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+		ICM_LINK_INFO_DEPTH_SHIFT;
+	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+			 ICM_FLAGS_SLEVEL_SHIFT;
+	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+
+	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
+			link, depth);
+		return;
+	}
+
+	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+	if (sw) {
+		u8 phy_port, sw_phy_port;
+
+		parent_sw = tb_to_switch(sw->dev.parent);
+		sw_phy_port = tb_phy_port_from_link(sw->link);
+		phy_port = tb_phy_port_from_link(link);
+
+		/*
+		 * On resume ICM will send us connected events for the
+		 * devices that still are present. However, that
+		 * information might have changed for example by the
+		 * fact that a switch on a dual-link connection might
+		 * have been enumerated using the other link now. Make
+		 * sure our book keeping matches that.
+		 */
+		if (sw->depth == depth && sw_phy_port == phy_port &&
+		    !!sw->authorized == authorized) {
+			/*
+			 * It was enumerated through another link so update
+			 * route string accordingly.
+			 */
+			if (sw->link != link) {
+				ret = icm->get_route(tb, link, depth, &route);
+				if (ret) {
+					tb_err(tb, "failed to update route string for switch at %u.%u\n",
+					       link, depth);
+					tb_switch_put(sw);
+					return;
+				}
+			} else {
+				route = tb_route(sw);
+			}
+
+			update_switch(parent_sw, sw, route, pkg->connection_id,
+				      pkg->connection_key, link, depth, boot);
+			tb_switch_put(sw);
+			return;
+		}
+
+		/*
+		 * User connected the same switch to another physical
+		 * port or to another part of the topology. Remove the
+		 * existing switch now before adding the new one.
+		 */
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/*
+	 * If the switch was not found by UUID, look for a switch on
+	 * same physical port (taking possible link aggregation into
+	 * account) and depth. If we found one it is definitely a stale
+	 * one so remove it first.
+	 */
+	sw = tb_switch_find_by_link_depth(tb, link, depth);
+	if (!sw) {
+		u8 dual_link;
+
+		dual_link = dual_link_from_link(link);
+		if (dual_link)
+			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
+	}
+	if (sw) {
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/* Remove existing XDomain connection if found */
+	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
+	if (!parent_sw) {
+		tb_err(tb, "failed to find parent switch for %u.%u\n",
+		       link, depth);
+		return;
+	}
+
+	ret = icm->get_route(tb, link, depth, &route);
+	if (ret) {
+		tb_err(tb, "failed to find route string for switch at %u.%u\n",
+		       link, depth);
+		tb_switch_put(parent_sw);
+		return;
+	}
+
+	add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+		   sizeof(pkg->ep_name), pkg->connection_id,
+		   pkg->connection_key, link, depth, security_level,
+		   authorized, boot);
+
+	tb_switch_put(parent_sw);
+}
+
+static void
+icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_device_disconnected *pkg =
+		(const struct icm_fr_event_device_disconnected *)hdr;
+	struct tb_switch *sw;
+	u8 link, depth;
+
+	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+		ICM_LINK_INFO_DEPTH_SHIFT;
+
+	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+		return;
+	}
+
+	sw = tb_switch_find_by_link_depth(tb, link, depth);
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+			depth);
+		return;
+	}
+
+	remove_switch(sw);
+	tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_xdomain_connected *pkg =
+		(const struct icm_fr_event_xdomain_connected *)hdr;
+	struct tb_xdomain *xd;
+	struct tb_switch *sw;
+	u8 link, depth;
+	u64 route;
+
+	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+		ICM_LINK_INFO_DEPTH_SHIFT;
+
+	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+		return;
+	}
+
+	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+	if (xd) {
+		u8 xd_phy_port, phy_port;
+
+		xd_phy_port = phy_port_from_route(xd->route, xd->depth);
+		phy_port = phy_port_from_route(route, depth);
+
+		if (xd->depth == depth && xd_phy_port == phy_port) {
+			update_xdomain(xd, route, link);
+			tb_xdomain_put(xd);
+			return;
+		}
+
+		/*
+		 * If we find an existing XDomain connection remove it
+		 * now. We need to go through login handshake and
+		 * everything anyway to be able to re-establish the
+		 * connection.
+		 */
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/*
+	 * Look if there already exists an XDomain in the same place
+	 * than the new one and in that case remove it because it is
+	 * most likely another host that got disconnected.
+	 */
+	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+	if (!xd) {
+		u8 dual_link;
+
+		dual_link = dual_link_from_link(link);
+		if (dual_link)
+			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
+							   depth);
+	}
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/*
+	 * If the user disconnected a switch during suspend and
+	 * connected another host to the same port, remove the switch
+	 * first.
+	 */
+	sw = get_switch_at_route(tb->root_switch, route);
+	if (sw)
+		remove_switch(sw);
+
+	sw = tb_switch_find_by_link_depth(tb, link, depth);
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+			depth);
+		return;
+	}
+
+	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
+		    depth);
+	tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_xdomain_disconnected *pkg =
+		(const struct icm_fr_event_xdomain_disconnected *)hdr;
+	struct tb_xdomain *xd;
+
+	/*
+	 * If the connection is through one or multiple devices, the
+	 * XDomain device is removed along with them so it is fine if we
+	 * cannot find it here.
+	 */
+	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+}
+
+static int
+icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl, bool *rpm)
+{
+	struct icm_tr_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, 20000);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
+	if (nboot_acl)
+		*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
+				ICM_TR_INFO_BOOT_ACL_SHIFT;
+	if (rpm)
+		*rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
+
+	return 0;
+}
+
+static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_tr_pkg_approve_device request;
+	struct icm_tr_pkg_approve_device reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_APPROVE_DEVICE;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_APPROVE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "PCIe tunnel creation failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_tr_pkg_add_device_key_response reply;
+	struct icm_tr_pkg_add_device_key request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_ADD_DEVICE_KEY;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "Adding key to switch failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+				       const u8 *challenge, u8 *response)
+{
+	struct icm_tr_pkg_challenge_device_response reply;
+	struct icm_tr_pkg_challenge_device request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_CHALLENGE_DEVICE;
+	request.route_lo = sw->config.route_lo;
+	request.route_hi = sw->config.route_hi;
+	request.connection_id = sw->connection_id;
+	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EKEYREJECTED;
+	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+		return -ENOKEY;
+
+	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+	return 0;
+}
+
+static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	struct icm_tr_pkg_approve_xdomain_response reply;
+	struct icm_tr_pkg_approve_xdomain request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	request.hdr.code = ICM_APPROVE_XDOMAIN;
+	request.route_hi = upper_32_bits(xd->route);
+	request.route_lo = lower_32_bits(xd->route);
+	request.transmit_path = xd->transmit_path;
+	request.transmit_ring = xd->transmit_ring;
+	request.receive_path = xd->receive_path;
+	request.receive_ring = xd->receive_ring;
+	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
+				    int stage)
+{
+	struct icm_tr_pkg_disconnect_xdomain_response reply;
+	struct icm_tr_pkg_disconnect_xdomain request;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	request.hdr.code = ICM_DISCONNECT_XDOMAIN;
+	request.stage = stage;
+	request.route_hi = upper_32_bits(xd->route);
+	request.route_lo = lower_32_bits(xd->route);
+	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+	int ret;
+
+	ret = icm_tr_xdomain_tear_down(tb, xd, 1);
+	if (ret)
+		return ret;
+
+	usleep_range(10, 50);
+	return icm_tr_xdomain_tear_down(tb, xd, 2);
+}
+
+static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_device_connected *pkg =
+		(const struct icm_tr_event_device_connected *)hdr;
+	enum tb_security_level security_level;
+	struct tb_switch *sw, *parent_sw;
+	struct tb_xdomain *xd;
+	bool authorized, boot;
+	u64 route;
+
+	/*
+	 * Currently we don't use the QoS information coming with the
+	 * device connected message so simply just ignore that extra
+	 * packet for now.
+	 */
+	if (pkg->hdr.packet_id)
+		return;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+			 ICM_FLAGS_SLEVEL_SHIFT;
+	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+
+	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
+			route);
+		return;
+	}
+
+	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+	if (sw) {
+		/* Update the switch if it is still in the same place */
+		if (tb_route(sw) == route && !!sw->authorized == authorized) {
+			parent_sw = tb_to_switch(sw->dev.parent);
+			update_switch(parent_sw, sw, route, pkg->connection_id,
+				      0, 0, 0, boot);
+			tb_switch_put(sw);
+			return;
+		}
+
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/* Another switch with the same address */
+	sw = tb_switch_find_by_route(tb, route);
+	if (sw) {
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/* XDomain connection with the same address */
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
+	if (!parent_sw) {
+		tb_err(tb, "failed to find parent switch for %llx\n", route);
+		return;
+	}
+
+	add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+		   sizeof(pkg->ep_name), pkg->connection_id,
+		   0, 0, 0, security_level, authorized, boot);
+
+	tb_switch_put(parent_sw);
+}
+
+static void
+icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_device_disconnected *pkg =
+		(const struct icm_tr_event_device_disconnected *)hdr;
+	struct tb_switch *sw;
+	u64 route;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+
+	sw = tb_switch_find_by_route(tb, route);
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+		return;
+	}
+
+	remove_switch(sw);
+	tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_xdomain_connected *pkg =
+		(const struct icm_tr_event_xdomain_connected *)hdr;
+	struct tb_xdomain *xd;
+	struct tb_switch *sw;
+	u64 route;
+
+	if (!tb->root_switch)
+		return;
+
+	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+	if (xd) {
+		if (xd->route == route) {
+			update_xdomain(xd, route, 0);
+			tb_xdomain_put(xd);
+			return;
+		}
+
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/* An existing xdomain with the same address */
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+
+	/*
+	 * If the user disconnected a switch during suspend and
+	 * connected another host to the same port, remove the switch
+	 * first.
+	 */
+	sw = get_switch_at_route(tb->root_switch, route);
+	if (sw)
+		remove_switch(sw);
+
+	sw = tb_switch_find_by_route(tb, get_parent_route(route));
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+		return;
+	}
+
+	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
+	tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_tr_event_xdomain_disconnected *pkg =
+		(const struct icm_tr_event_xdomain_disconnected *)hdr;
+	struct tb_xdomain *xd;
+	u64 route;
+
+	route = get_route(pkg->route_hi, pkg->route_lo);
+
+	xd = tb_xdomain_find_by_route(tb, route);
+	if (xd) {
+		remove_xdomain(xd);
+		tb_xdomain_put(xd);
+	}
+}
+
+static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
+{
+	struct pci_dev *parent;
+
+	parent = pci_upstream_bridge(pdev);
+	while (parent) {
+		if (!pci_is_pcie(parent))
+			return NULL;
+		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
+			break;
+		parent = pci_upstream_bridge(parent);
+	}
+
+	if (!parent)
+		return NULL;
+
+	switch (parent->device) {
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+		return parent;
+	}
+
+	return NULL;
+}
+
+static bool icm_ar_is_supported(struct tb *tb)
+{
+	struct pci_dev *upstream_port;
+	struct icm *icm = tb_priv(tb);
+
+	/*
+	 * Starting from Alpine Ridge we can use ICM on Apple machines
+	 * as well. We just need to reset and re-enable it first.
+	 */
+	if (!x86_apple_machine)
+		return true;
+
+	/*
+	 * Find the upstream PCIe port in case we need to do reset
+	 * through its vendor specific registers.
+	 */
+	upstream_port = get_upstream_port(tb->nhi->pdev);
+	if (upstream_port) {
+		int cap;
+
+		cap = pci_find_ext_capability(upstream_port,
+					      PCI_EXT_CAP_ID_VNDR);
+		if (cap > 0) {
+			icm->upstream_port = upstream_port;
+			icm->vnd_cap = cap;
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int icm_ar_get_mode(struct tb *tb)
+{
+	struct tb_nhi *nhi = tb->nhi;
+	int retries = 60;
+	u32 val;
+
+	do {
+		val = ioread32(nhi->iobase + REG_FW_STS);
+		if (val & REG_FW_STS_NVM_AUTH_DONE)
+			break;
+		msleep(50);
+	} while (--retries);
+
+	if (!retries) {
+		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
+		return -ENODEV;
+	}
+
+	return nhi_mailbox_mode(nhi);
+}
+
+static int
+icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		    size_t *nboot_acl, bool *rpm)
+{
+	struct icm_ar_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
+	if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
+		*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
+				ICM_AR_INFO_BOOT_ACL_SHIFT;
+	if (rpm)
+		*rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
+
+	return 0;
+}
+
+static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+	struct icm_ar_pkg_get_route_response reply;
+	struct icm_ar_pkg_get_route request = {
+		.hdr = { .code = ICM_GET_ROUTE },
+		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	*route = get_route(reply.route_hi, reply.route_lo);
+	return 0;
+}
+
+static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
+{
+	struct icm_ar_pkg_preboot_acl_response reply;
+	struct icm_ar_pkg_preboot_acl request = {
+		.hdr = { .code = ICM_PREBOOT_ACL },
+	};
+	int ret, i;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	for (i = 0; i < nuuids; i++) {
+		u32 *uuid = (u32 *)&uuids[i];
+
+		uuid[0] = reply.acl[i].uuid_lo;
+		uuid[1] = reply.acl[i].uuid_hi;
+
+		if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
+			/* Map empty entries to null UUID */
+			uuid[0] = 0;
+			uuid[1] = 0;
+		} else if (uuid[0] != 0 || uuid[1] != 0) {
+			/* Upper two DWs are always one's */
+			uuid[2] = 0xffffffff;
+			uuid[3] = 0xffffffff;
+		}
+	}
+
+	return ret;
+}
+
+static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
+			       size_t nuuids)
+{
+	struct icm_ar_pkg_preboot_acl_response reply;
+	struct icm_ar_pkg_preboot_acl request = {
+		.hdr = {
+			.code = ICM_PREBOOT_ACL,
+			.flags = ICM_FLAGS_WRITE,
+		},
+	};
+	int ret, i;
+
+	for (i = 0; i < nuuids; i++) {
+		const u32 *uuid = (const u32 *)&uuids[i];
+
+		if (uuid_is_null(&uuids[i])) {
+			/*
+			 * Map null UUID to the empty (all one) entries
+			 * for ICM.
+			 */
+			request.acl[i].uuid_lo = 0xffffffff;
+			request.acl[i].uuid_hi = 0xffffffff;
+		} else {
+			/* Two high DWs need to be set to all one */
+			if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
+				return -EINVAL;
+
+			request.acl[i].uuid_lo = uuid[0];
+			request.acl[i].uuid_hi = uuid[1];
+		}
+	}
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+static void icm_handle_notification(struct work_struct *work)
+{
+	struct icm_notification *n = container_of(work, typeof(*n), work);
+	struct tb *tb = n->tb;
+	struct icm *icm = tb_priv(tb);
+
+	mutex_lock(&tb->lock);
+
+	/*
+	 * When the domain is stopped we flush its workqueue but before
+	 * that the root switch is removed. In that case we should treat
+	 * the queued events as being canceled.
+	 */
+	if (tb->root_switch) {
+		switch (n->pkg->code) {
+		case ICM_EVENT_DEVICE_CONNECTED:
+			icm->device_connected(tb, n->pkg);
+			break;
+		case ICM_EVENT_DEVICE_DISCONNECTED:
+			icm->device_disconnected(tb, n->pkg);
+			break;
+		case ICM_EVENT_XDOMAIN_CONNECTED:
+			icm->xdomain_connected(tb, n->pkg);
+			break;
+		case ICM_EVENT_XDOMAIN_DISCONNECTED:
+			icm->xdomain_disconnected(tb, n->pkg);
+			break;
+		}
+	}
+
+	mutex_unlock(&tb->lock);
+
+	kfree(n->pkg);
+	kfree(n);
+}
+
+static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+			     const void *buf, size_t size)
+{
+	struct icm_notification *n;
+
+	n = kmalloc(sizeof(*n), GFP_KERNEL);
+	if (!n)
+		return;
+
+	INIT_WORK(&n->work, icm_handle_notification);
+	n->pkg = kmemdup(buf, size, GFP_KERNEL);
+	n->tb = tb;
+
+	queue_work(tb->wq, &n->work);
+}
+
+static int
+__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+		   size_t *nboot_acl, bool *rpm)
+{
+	struct icm *icm = tb_priv(tb);
+	unsigned int retries = 50;
+	int ret;
+
+	ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
+	if (ret) {
+		tb_err(tb, "failed to send driver ready to ICM\n");
+		return ret;
+	}
+
+	/*
+	 * Hold on here until the switch config space is accessible so
+	 * that we can read root switch config successfully.
+	 */
+	do {
+		struct tb_cfg_result res;
+		u32 tmp;
+
+		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
+				      0, 1, 100);
+		if (!res.err)
+			return 0;
+
+		msleep(50);
+	} while (--retries);
+
+	tb_err(tb, "failed to read root switch config space, giving up\n");
+	return -ETIMEDOUT;
+}
+
+static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
+	u32 cmd;
+
+	do {
+		pci_read_config_dword(icm->upstream_port,
+				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
+		if (!(cmd & PCIE2CIO_CMD_START)) {
+			if (cmd & PCIE2CIO_CMD_TIMEOUT)
+				break;
+			return 0;
+		}
+
+		msleep(50);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+}
+
+static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
+			 unsigned int port, unsigned int index, u32 *data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int ret, vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	ret = pci2cio_wait_completion(icm, 5000);
+	if (ret)
+		return ret;
+
+	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
+	return 0;
+}
+
+static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
+			  unsigned int port, unsigned int index, u32 data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	return pci2cio_wait_completion(icm, 5000);
+}
+
+static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
+{
+	struct icm *icm = tb_priv(tb);
+	u32 val;
+
+	if (!icm->upstream_port)
+		return -ENODEV;
+
+	/* Put ARC to wait for CIO reset event to happen */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	val |= REG_FW_STS_CIO_RESET_REQ;
+	iowrite32(val, nhi->iobase + REG_FW_STS);
+
+	/* Re-start ARC */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	val |= REG_FW_STS_ICM_EN_INVERT;
+	val |= REG_FW_STS_ICM_EN_CPU;
+	iowrite32(val, nhi->iobase + REG_FW_STS);
+
+	/* Trigger CIO reset now */
+	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
+}
+
+static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
+{
+	unsigned int retries = 10;
+	int ret;
+	u32 val;
+
+	/* Check if the ICM firmware is already running */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	if (val & REG_FW_STS_ICM_EN)
+		return 0;
+
+	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
+
+	ret = icm_firmware_reset(tb, nhi);
+	if (ret)
+		return ret;
+
+	/* Wait until the ICM firmware tells us it is up and running */
+	do {
+		/* Check that the ICM firmware is running */
+		val = ioread32(nhi->iobase + REG_FW_STS);
+		if (val & REG_FW_STS_NVM_AUTH_DONE)
+			return 0;
+
+		msleep(300);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int icm_reset_phy_port(struct tb *tb, int phy_port)
+{
+	struct icm *icm = tb_priv(tb);
+	u32 state0, state1;
+	int port0, port1;
+	u32 val0, val1;
+	int ret;
+
+	if (!icm->upstream_port)
+		return 0;
+
+	if (phy_port) {
+		port0 = 3;
+		port1 = 4;
+	} else {
+		port0 = 1;
+		port1 = 2;
+	}
+
+	/*
+	 * Read link status of both null ports belonging to a single
+	 * physical port.
+	 */
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+	if (ret)
+		return ret;
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+	if (ret)
+		return ret;
+
+	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
+	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
+	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+
+	/* If they are both up we need to reset them now */
+	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
+		return 0;
+
+	val0 |= PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+	if (ret)
+		return ret;
+
+	val1 |= PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+	if (ret)
+		return ret;
+
+	/* Wait a bit and then re-enable both ports */
+	usleep_range(10, 100);
+
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+	if (ret)
+		return ret;
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+	if (ret)
+		return ret;
+
+	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+	if (ret)
+		return ret;
+
+	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
+	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+}
+
+static int icm_firmware_init(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
+
+	ret = icm_firmware_start(tb, nhi);
+	if (ret) {
+		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
+		return ret;
+	}
+
+	if (icm->get_mode) {
+		ret = icm->get_mode(tb);
+
+		switch (ret) {
+		case NHI_FW_SAFE_MODE:
+			icm->safe_mode = true;
+			break;
+
+		case NHI_FW_CM_MODE:
+			/* Ask ICM to accept all Thunderbolt devices */
+			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
+			break;
+
+		default:
+			if (ret < 0)
+				return ret;
+
+			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
+			return -ENODEV;
+		}
+	}
+
+	/*
+	 * Reset both physical ports if there is anything connected to
+	 * them already.
+	 */
+	ret = icm_reset_phy_port(tb, 0);
+	if (ret)
+		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
+	ret = icm_reset_phy_port(tb, 1);
+	if (ret)
+		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
+
+	return 0;
+}
+
+static int icm_driver_ready(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	int ret;
+
+	ret = icm_firmware_init(tb);
+	if (ret)
+		return ret;
+
+	if (icm->safe_mode) {
+		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
+		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
+		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
+		return 0;
+	}
+
+	ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
+				 &icm->rpm);
+	if (ret)
+		return ret;
+
+	/*
+	 * Make sure the number of supported preboot ACL matches what we
+	 * expect or disable the whole feature.
+	 */
+	if (tb->nboot_acl > icm->max_boot_acl)
+		tb->nboot_acl = 0;
+
+	return 0;
+}
+
+static int icm_suspend(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (icm->save_devices)
+		icm->save_devices(tb);
+
+	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+	return 0;
+}
+
+/*
+ * Mark all switches (except root switch) below this one unplugged. ICM
+ * firmware will send us an updated list of switches after we have send
+ * it driver ready command. If a switch is not in that list it will be
+ * removed when we perform rescan.
+ */
+static void icm_unplug_children(struct tb_switch *sw)
+{
+	unsigned int i;
+
+	if (tb_route(sw))
+		sw->is_unplugged = true;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+
+		if (tb_is_upstream_port(port))
+			continue;
+		if (port->xdomain) {
+			port->xdomain->is_unplugged = true;
+			continue;
+		}
+		if (!port->remote)
+			continue;
+
+		icm_unplug_children(port->remote->sw);
+	}
+}
+
+static void icm_free_unplugged_children(struct tb_switch *sw)
+{
+	unsigned int i;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+
+		if (tb_is_upstream_port(port))
+			continue;
+
+		if (port->xdomain && port->xdomain->is_unplugged) {
+			tb_xdomain_remove(port->xdomain);
+			port->xdomain = NULL;
+			continue;
+		}
+
+		if (!port->remote)
+			continue;
+
+		if (port->remote->sw->is_unplugged) {
+			tb_switch_remove(port->remote->sw);
+			port->remote = NULL;
+		} else {
+			icm_free_unplugged_children(port->remote->sw);
+		}
+	}
+}
+
+static void icm_rescan_work(struct work_struct *work)
+{
+	struct icm *icm = container_of(work, struct icm, rescan_work.work);
+	struct tb *tb = icm_to_tb(icm);
+
+	mutex_lock(&tb->lock);
+	if (tb->root_switch)
+		icm_free_unplugged_children(tb->root_switch);
+	mutex_unlock(&tb->lock);
+}
+
+static void icm_complete(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (tb->nhi->going_away)
+		return;
+
+	icm_unplug_children(tb->root_switch);
+
+	/*
+	 * Now all existing children should be resumed, start events
+	 * from ICM to get updated status.
+	 */
+	__icm_driver_ready(tb, NULL, NULL, NULL);
+
+	/*
+	 * We do not get notifications of devices that have been
+	 * unplugged during suspend so schedule rescan to clean them up
+	 * if any.
+	 */
+	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
+}
+
+static int icm_runtime_suspend(struct tb *tb)
+{
+	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+	return 0;
+}
+
+static int icm_runtime_resume(struct tb *tb)
+{
+	/*
+	 * We can reuse the same resume functionality than with system
+	 * suspend.
+	 */
+	icm_complete(tb);
+	return 0;
+}
+
+static int icm_start(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	int ret;
+
+	if (icm->safe_mode)
+		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
+	else
+		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+	if (!tb->root_switch)
+		return -ENODEV;
+
+	/*
+	 * NVM upgrade has not been tested on Apple systems and they
+	 * don't provide images publicly either. To be on the safe side
+	 * prevent root switch NVM upgrade on Macs for now.
+	 */
+	tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+	tb->root_switch->rpm = icm->rpm;
+
+	ret = tb_switch_add(tb->root_switch);
+	if (ret) {
+		tb_switch_put(tb->root_switch);
+		tb->root_switch = NULL;
+	}
+
+	return ret;
+}
+
+static void icm_stop(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	cancel_delayed_work(&icm->rescan_work);
+	tb_switch_remove(tb->root_switch);
+	tb->root_switch = NULL;
+	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+}
+
+static int icm_disconnect_pcie_paths(struct tb *tb)
+{
+	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
+}
+
+/* Falcon Ridge */
+static const struct tb_cm_ops icm_fr_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.handle_event = icm_handle_event,
+	.approve_switch = icm_fr_approve_switch,
+	.add_switch_key = icm_fr_add_switch_key,
+	.challenge_switch_key = icm_fr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
+};
+
+/* Alpine Ridge */
+static const struct tb_cm_ops icm_ar_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.runtime_suspend = icm_runtime_suspend,
+	.runtime_resume = icm_runtime_resume,
+	.handle_event = icm_handle_event,
+	.get_boot_acl = icm_ar_get_boot_acl,
+	.set_boot_acl = icm_ar_set_boot_acl,
+	.approve_switch = icm_fr_approve_switch,
+	.add_switch_key = icm_fr_add_switch_key,
+	.challenge_switch_key = icm_fr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
+};
+
+/* Titan Ridge */
+static const struct tb_cm_ops icm_tr_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.runtime_suspend = icm_runtime_suspend,
+	.runtime_resume = icm_runtime_resume,
+	.handle_event = icm_handle_event,
+	.get_boot_acl = icm_ar_get_boot_acl,
+	.set_boot_acl = icm_ar_set_boot_acl,
+	.approve_switch = icm_tr_approve_switch,
+	.add_switch_key = icm_tr_add_switch_key,
+	.challenge_switch_key = icm_tr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
+struct tb *icm_probe(struct tb_nhi *nhi)
+{
+	struct icm *icm;
+	struct tb *tb;
+
+	tb = tb_domain_alloc(nhi, sizeof(struct icm));
+	if (!tb)
+		return NULL;
+
+	icm = tb_priv(tb);
+	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
+	mutex_init(&icm->request_lock);
+
+	switch (nhi->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+		icm->is_supported = icm_fr_is_supported;
+		icm->get_route = icm_fr_get_route;
+		icm->save_devices = icm_fr_save_devices;
+		icm->driver_ready = icm_fr_driver_ready;
+		icm->device_connected = icm_fr_device_connected;
+		icm->device_disconnected = icm_fr_device_disconnected;
+		icm->xdomain_connected = icm_fr_xdomain_connected;
+		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
+		tb->cm_ops = &icm_fr_ops;
+		break;
+
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
+		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+		icm->is_supported = icm_ar_is_supported;
+		icm->get_mode = icm_ar_get_mode;
+		icm->get_route = icm_ar_get_route;
+		icm->save_devices = icm_fr_save_devices;
+		icm->driver_ready = icm_ar_driver_ready;
+		icm->device_connected = icm_fr_device_connected;
+		icm->device_disconnected = icm_fr_device_disconnected;
+		icm->xdomain_connected = icm_fr_xdomain_connected;
+		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
+		tb->cm_ops = &icm_ar_ops;
+		break;
+
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
+		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+		icm->is_supported = icm_ar_is_supported;
+		icm->get_mode = icm_ar_get_mode;
+		icm->driver_ready = icm_tr_driver_ready;
+		icm->device_connected = icm_tr_device_connected;
+		icm->device_disconnected = icm_tr_device_disconnected;
+		icm->xdomain_connected = icm_tr_xdomain_connected;
+		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+		tb->cm_ops = &icm_tr_ops;
+		break;
+	}
+
+	if (!icm->is_supported || !icm->is_supported(tb)) {
+		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
+		tb_domain_put(tb);
+		return NULL;
+	}
+
+	return tb;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
new file mode 100644
index 0000000..5cd6bdf
--- /dev/null
+++ b/drivers/thunderbolt/nhi.c
@@ -0,0 +1,1195 @@
+/*
+ * Thunderbolt Cactus Ridge driver - NHI driver
+ *
+ * The NHI (native host interface) is the pci device that allows us to send and
+ * receive frames from the thunderbolt bus.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+
+/*
+ * Used to enable end-to-end workaround for missing RX packets. Do not
+ * use this ring for anything else.
+ */
+#define RING_E2E_UNUSED_HOPID	2
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define RING_FIRST_USABLE_HOPID	8
+
+/*
+ * Minimal number of vectors when we use MSI-X. Two for control channel
+ * Rx/Tx and the rest four are for cross domain DMA paths.
+ */
+#define MSIX_MIN_VECS		6
+#define MSIX_MAX_VECS		16
+
+#define NHI_MAILBOX_TIMEOUT	500 /* ms */
+
+static int ring_interrupt_index(struct tb_ring *ring)
+{
+	int bit = ring->hop;
+	if (!ring->is_tx)
+		bit += ring->nhi->hop_count;
+	return bit;
+}
+
+/**
+ * ring_interrupt_active() - activate/deactivate interrupts for a single ring
+ *
+ * ring->nhi->lock must be held.
+ */
+static void ring_interrupt_active(struct tb_ring *ring, bool active)
+{
+	int reg = REG_RING_INTERRUPT_BASE +
+		  ring_interrupt_index(ring) / 32 * 4;
+	int bit = ring_interrupt_index(ring) & 31;
+	int mask = 1 << bit;
+	u32 old, new;
+
+	if (ring->irq > 0) {
+		u32 step, shift, ivr, misc;
+		void __iomem *ivr_base;
+		int index;
+
+		if (ring->is_tx)
+			index = ring->hop;
+		else
+			index = ring->hop + ring->nhi->hop_count;
+
+		/*
+		 * Ask the hardware to clear interrupt status bits automatically
+		 * since we already know which interrupt was triggered.
+		 */
+		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+		if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+			misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+		}
+
+		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+		shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+		ivr = ioread32(ivr_base + step);
+		ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
+		if (active)
+			ivr |= ring->vector << shift;
+		iowrite32(ivr, ivr_base + step);
+	}
+
+	old = ioread32(ring->nhi->iobase + reg);
+	if (active)
+		new = old | mask;
+	else
+		new = old & ~mask;
+
+	dev_info(&ring->nhi->pdev->dev,
+		 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+		 active ? "enabling" : "disabling", reg, bit, old, new);
+
+	if (new == old)
+		dev_WARN(&ring->nhi->pdev->dev,
+					 "interrupt for %s %d is already %s\n",
+					 RING_TYPE(ring), ring->hop,
+					 active ? "enabled" : "disabled");
+	iowrite32(new, ring->nhi->iobase + reg);
+}
+
+/**
+ * nhi_disable_interrupts() - disable interrupts for all rings
+ *
+ * Use only during init and shutdown.
+ */
+static void nhi_disable_interrupts(struct tb_nhi *nhi)
+{
+	int i = 0;
+	/* disable interrupts */
+	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
+		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+
+	/* clear interrupt status bits */
+	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
+		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+}
+
+/* ring helper methods */
+
+static void __iomem *ring_desc_base(struct tb_ring *ring)
+{
+	void __iomem *io = ring->nhi->iobase;
+	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
+	io += ring->hop * 16;
+	return io;
+}
+
+static void __iomem *ring_options_base(struct tb_ring *ring)
+{
+	void __iomem *io = ring->nhi->iobase;
+	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
+	io += ring->hop * 32;
+	return io;
+}
+
+static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+	iowrite16(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+	iowrite32(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
+{
+	iowrite32(value, ring_desc_base(ring) + offset);
+	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
+}
+
+static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
+{
+	iowrite32(value, ring_options_base(ring) + offset);
+}
+
+static bool ring_full(struct tb_ring *ring)
+{
+	return ((ring->head + 1) % ring->size) == ring->tail;
+}
+
+static bool ring_empty(struct tb_ring *ring)
+{
+	return ring->head == ring->tail;
+}
+
+/**
+ * ring_write_descriptors() - post frames from ring->queue to the controller
+ *
+ * ring->lock is held.
+ */
+static void ring_write_descriptors(struct tb_ring *ring)
+{
+	struct ring_frame *frame, *n;
+	struct ring_desc *descriptor;
+	list_for_each_entry_safe(frame, n, &ring->queue, list) {
+		if (ring_full(ring))
+			break;
+		list_move_tail(&frame->list, &ring->in_flight);
+		descriptor = &ring->descriptors[ring->head];
+		descriptor->phys = frame->buffer_phy;
+		descriptor->time = 0;
+		descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
+		if (ring->is_tx) {
+			descriptor->length = frame->size;
+			descriptor->eof = frame->eof;
+			descriptor->sof = frame->sof;
+		}
+		ring->head = (ring->head + 1) % ring->size;
+		ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+	}
+}
+
+/**
+ * ring_work() - progress completed frames
+ *
+ * If the ring is shutting down then all frames are marked as canceled and
+ * their callbacks are invoked.
+ *
+ * Otherwise we collect all completed frame from the ring buffer, write new
+ * frame to the ring buffer and invoke the callbacks for the completed frames.
+ */
+static void ring_work(struct work_struct *work)
+{
+	struct tb_ring *ring = container_of(work, typeof(*ring), work);
+	struct ring_frame *frame;
+	bool canceled = false;
+	unsigned long flags;
+	LIST_HEAD(done);
+
+	spin_lock_irqsave(&ring->lock, flags);
+
+	if (!ring->running) {
+		/*  Move all frames to done and mark them as canceled. */
+		list_splice_tail_init(&ring->in_flight, &done);
+		list_splice_tail_init(&ring->queue, &done);
+		canceled = true;
+		goto invoke_callback;
+	}
+
+	while (!ring_empty(ring)) {
+		if (!(ring->descriptors[ring->tail].flags
+				& RING_DESC_COMPLETED))
+			break;
+		frame = list_first_entry(&ring->in_flight, typeof(*frame),
+					 list);
+		list_move_tail(&frame->list, &done);
+		if (!ring->is_tx) {
+			frame->size = ring->descriptors[ring->tail].length;
+			frame->eof = ring->descriptors[ring->tail].eof;
+			frame->sof = ring->descriptors[ring->tail].sof;
+			frame->flags = ring->descriptors[ring->tail].flags;
+		}
+		ring->tail = (ring->tail + 1) % ring->size;
+	}
+	ring_write_descriptors(ring);
+
+invoke_callback:
+	/* allow callbacks to schedule new work */
+	spin_unlock_irqrestore(&ring->lock, flags);
+	while (!list_empty(&done)) {
+		frame = list_first_entry(&done, typeof(*frame), list);
+		/*
+		 * The callback may reenqueue or delete frame.
+		 * Do not hold on to it.
+		 */
+		list_del_init(&frame->list);
+		if (frame->callback)
+			frame->callback(ring, frame, canceled);
+	}
+}
+
+int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (ring->running) {
+		list_add_tail(&frame->list, &ring->queue);
+		ring_write_descriptors(ring);
+	} else {
+		ret = -ESHUTDOWN;
+	}
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
+
+/**
+ * tb_ring_poll() - Poll one completed frame from the ring
+ * @ring: Ring to poll
+ *
+ * This function can be called when @start_poll callback of the @ring
+ * has been called. It will read one completed frame from the ring and
+ * return it to the caller. Returns %NULL if there is no more completed
+ * frames.
+ */
+struct ring_frame *tb_ring_poll(struct tb_ring *ring)
+{
+	struct ring_frame *frame = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (!ring->running)
+		goto unlock;
+	if (ring_empty(ring))
+		goto unlock;
+
+	if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
+		frame = list_first_entry(&ring->in_flight, typeof(*frame),
+					 list);
+		list_del_init(&frame->list);
+
+		if (!ring->is_tx) {
+			frame->size = ring->descriptors[ring->tail].length;
+			frame->eof = ring->descriptors[ring->tail].eof;
+			frame->sof = ring->descriptors[ring->tail].sof;
+			frame->flags = ring->descriptors[ring->tail].flags;
+		}
+
+		ring->tail = (ring->tail + 1) % ring->size;
+	}
+
+unlock:
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return frame;
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll);
+
+static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
+{
+	int idx = ring_interrupt_index(ring);
+	int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
+	int bit = idx % 32;
+	u32 val;
+
+	val = ioread32(ring->nhi->iobase + reg);
+	if (mask)
+		val &= ~BIT(bit);
+	else
+		val |= BIT(bit);
+	iowrite32(val, ring->nhi->iobase + reg);
+}
+
+/* Both @nhi->lock and @ring->lock should be held */
+static void __ring_interrupt(struct tb_ring *ring)
+{
+	if (!ring->running)
+		return;
+
+	if (ring->start_poll) {
+		__ring_interrupt_mask(ring, true);
+		ring->start_poll(ring->poll_data);
+	} else {
+		schedule_work(&ring->work);
+	}
+}
+
+/**
+ * tb_ring_poll_complete() - Re-start interrupt for the ring
+ * @ring: Ring to re-start the interrupt
+ *
+ * This will re-start (unmask) the ring interrupt once the user is done
+ * with polling.
+ */
+void tb_ring_poll_complete(struct tb_ring *ring)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->nhi->lock, flags);
+	spin_lock(&ring->lock);
+	if (ring->start_poll)
+		__ring_interrupt_mask(ring, false);
+	spin_unlock(&ring->lock);
+	spin_unlock_irqrestore(&ring->nhi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
+
+static irqreturn_t ring_msix(int irq, void *data)
+{
+	struct tb_ring *ring = data;
+
+	spin_lock(&ring->nhi->lock);
+	spin_lock(&ring->lock);
+	__ring_interrupt(ring);
+	spin_unlock(&ring->lock);
+	spin_unlock(&ring->nhi->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+{
+	struct tb_nhi *nhi = ring->nhi;
+	unsigned long irqflags;
+	int ret;
+
+	if (!nhi->pdev->msix_enabled)
+		return 0;
+
+	ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+	if (ret < 0)
+		return ret;
+
+	ring->vector = ret;
+
+	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+	if (ring->irq < 0)
+		return ring->irq;
+
+	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+}
+
+static void ring_release_msix(struct tb_ring *ring)
+{
+	if (ring->irq <= 0)
+		return;
+
+	free_irq(ring->irq, ring);
+	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+	ring->vector = 0;
+	ring->irq = 0;
+}
+
+static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
+{
+	int ret = 0;
+
+	spin_lock_irq(&nhi->lock);
+
+	if (ring->hop < 0) {
+		unsigned int i;
+
+		/*
+		 * Automatically allocate HopID from the non-reserved
+		 * range 8 .. hop_count - 1.
+		 */
+		for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+			if (ring->is_tx) {
+				if (!nhi->tx_rings[i]) {
+					ring->hop = i;
+					break;
+				}
+			} else {
+				if (!nhi->rx_rings[i]) {
+					ring->hop = i;
+					break;
+				}
+			}
+		}
+	}
+
+	if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
+		dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+	if (ring->is_tx && nhi->tx_rings[ring->hop]) {
+		dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
+			 ring->hop);
+		ret = -EBUSY;
+		goto err_unlock;
+	} else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+		dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
+			 ring->hop);
+		ret = -EBUSY;
+		goto err_unlock;
+	}
+
+	if (ring->is_tx)
+		nhi->tx_rings[ring->hop] = ring;
+	else
+		nhi->rx_rings[ring->hop] = ring;
+
+err_unlock:
+	spin_unlock_irq(&nhi->lock);
+
+	return ret;
+}
+
+static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+				     bool transmit, unsigned int flags,
+				     u16 sof_mask, u16 eof_mask,
+				     void (*start_poll)(void *),
+				     void *poll_data)
+{
+	struct tb_ring *ring = NULL;
+	dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+		 transmit ? "TX" : "RX", hop, size);
+
+	/* Tx Ring 2 is reserved for E2E workaround */
+	if (transmit && hop == RING_E2E_UNUSED_HOPID)
+		return NULL;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	spin_lock_init(&ring->lock);
+	INIT_LIST_HEAD(&ring->queue);
+	INIT_LIST_HEAD(&ring->in_flight);
+	INIT_WORK(&ring->work, ring_work);
+
+	ring->nhi = nhi;
+	ring->hop = hop;
+	ring->is_tx = transmit;
+	ring->size = size;
+	ring->flags = flags;
+	ring->sof_mask = sof_mask;
+	ring->eof_mask = eof_mask;
+	ring->head = 0;
+	ring->tail = 0;
+	ring->running = false;
+	ring->start_poll = start_poll;
+	ring->poll_data = poll_data;
+
+	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
+			size * sizeof(*ring->descriptors),
+			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
+	if (!ring->descriptors)
+		goto err_free_ring;
+
+	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+		goto err_free_descs;
+
+	if (nhi_alloc_hop(nhi, ring))
+		goto err_release_msix;
+
+	return ring;
+
+err_release_msix:
+	ring_release_msix(ring);
+err_free_descs:
+	dma_free_coherent(&ring->nhi->pdev->dev,
+			  ring->size * sizeof(*ring->descriptors),
+			  ring->descriptors, ring->descriptors_dma);
+err_free_ring:
+	kfree(ring);
+
+	return NULL;
+}
+
+/**
+ * tb_ring_alloc_tx() - Allocate DMA ring for transmit
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ */
+struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+				 unsigned int flags)
+{
+	return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
+
+/**
+ * tb_ring_alloc_rx() - Allocate DMA ring for receive
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ * @sof_mask: Mask of PDF values that start a frame
+ * @eof_mask: Mask of PDF values that end a frame
+ * @start_poll: If not %NULL the ring will call this function when an
+ *		interrupt is triggered and masked, instead of callback
+ *		in each Rx frame.
+ * @poll_data: Optional data passed to @start_poll
+ */
+struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+				 unsigned int flags, u16 sof_mask, u16 eof_mask,
+				 void (*start_poll)(void *), void *poll_data)
+{
+	return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
+			     start_poll, poll_data);
+}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
+
+/**
+ * tb_ring_start() - enable a ring
+ *
+ * Must not be invoked in parallel with tb_ring_stop().
+ */
+void tb_ring_start(struct tb_ring *ring)
+{
+	u16 frame_size;
+	u32 flags;
+
+	spin_lock_irq(&ring->nhi->lock);
+	spin_lock(&ring->lock);
+	if (ring->nhi->going_away)
+		goto err;
+	if (ring->running) {
+		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
+		goto err;
+	}
+	dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
+		 RING_TYPE(ring), ring->hop);
+
+	if (ring->flags & RING_FLAG_FRAME) {
+		/* Means 4096 */
+		frame_size = 0;
+		flags = RING_FLAG_ENABLE;
+	} else {
+		frame_size = TB_FRAME_SIZE;
+		flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
+	}
+
+	if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+		u32 hop;
+
+		/*
+		 * In order not to lose Rx packets we enable end-to-end
+		 * workaround which transfers Rx credits to an unused Tx
+		 * HopID.
+		 */
+		hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
+		hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
+		flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
+	}
+
+	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
+	if (ring->is_tx) {
+		ring_iowrite32desc(ring, ring->size, 12);
+		ring_iowrite32options(ring, 0, 4); /* time releated ? */
+		ring_iowrite32options(ring, flags, 0);
+	} else {
+		u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
+
+		ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
+		ring_iowrite32options(ring, sof_eof_mask, 4);
+		ring_iowrite32options(ring, flags, 0);
+	}
+	ring_interrupt_active(ring, true);
+	ring->running = true;
+err:
+	spin_unlock(&ring->lock);
+	spin_unlock_irq(&ring->nhi->lock);
+}
+EXPORT_SYMBOL_GPL(tb_ring_start);
+
+/**
+ * tb_ring_stop() - shutdown a ring
+ *
+ * Must not be invoked from a callback.
+ *
+ * This method will disable the ring. Further calls to
+ * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
+ * called.
+ *
+ * All enqueued frames will be canceled and their callbacks will be executed
+ * with frame->canceled set to true (on the callback thread). This method
+ * returns only after all callback invocations have finished.
+ */
+void tb_ring_stop(struct tb_ring *ring)
+{
+	spin_lock_irq(&ring->nhi->lock);
+	spin_lock(&ring->lock);
+	dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
+		 RING_TYPE(ring), ring->hop);
+	if (ring->nhi->going_away)
+		goto err;
+	if (!ring->running) {
+		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
+			 RING_TYPE(ring), ring->hop);
+		goto err;
+	}
+	ring_interrupt_active(ring, false);
+
+	ring_iowrite32options(ring, 0, 0);
+	ring_iowrite64desc(ring, 0, 0);
+	ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+	ring_iowrite32desc(ring, 0, 12);
+	ring->head = 0;
+	ring->tail = 0;
+	ring->running = false;
+
+err:
+	spin_unlock(&ring->lock);
+	spin_unlock_irq(&ring->nhi->lock);
+
+	/*
+	 * schedule ring->work to invoke callbacks on all remaining frames.
+	 */
+	schedule_work(&ring->work);
+	flush_work(&ring->work);
+}
+EXPORT_SYMBOL_GPL(tb_ring_stop);
+
+/*
+ * tb_ring_free() - free ring
+ *
+ * When this method returns all invocations of ring->callback will have
+ * finished.
+ *
+ * Ring must be stopped.
+ *
+ * Must NOT be called from ring_frame->callback!
+ */
+void tb_ring_free(struct tb_ring *ring)
+{
+	spin_lock_irq(&ring->nhi->lock);
+	/*
+	 * Dissociate the ring from the NHI. This also ensures that
+	 * nhi_interrupt_work cannot reschedule ring->work.
+	 */
+	if (ring->is_tx)
+		ring->nhi->tx_rings[ring->hop] = NULL;
+	else
+		ring->nhi->rx_rings[ring->hop] = NULL;
+
+	if (ring->running) {
+		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
+			 RING_TYPE(ring), ring->hop);
+	}
+	spin_unlock_irq(&ring->nhi->lock);
+
+	ring_release_msix(ring);
+
+	dma_free_coherent(&ring->nhi->pdev->dev,
+			  ring->size * sizeof(*ring->descriptors),
+			  ring->descriptors, ring->descriptors_dma);
+
+	ring->descriptors = NULL;
+	ring->descriptors_dma = 0;
+
+
+	dev_info(&ring->nhi->pdev->dev,
+		 "freeing %s %d\n",
+		 RING_TYPE(ring),
+		 ring->hop);
+
+	/**
+	 * ring->work can no longer be scheduled (it is scheduled only
+	 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
+	 * to finish before freeing the ring.
+	 */
+	flush_work(&ring->work);
+	kfree(ring);
+}
+EXPORT_SYMBOL_GPL(tb_ring_free);
+
+/**
+ * nhi_mailbox_cmd() - Send a command through NHI mailbox
+ * @nhi: Pointer to the NHI structure
+ * @cmd: Command to send
+ * @data: Data to be send with the command
+ *
+ * Sends mailbox command to the firmware running on NHI. Returns %0 in
+ * case of success and negative errno in case of failure.
+ */
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
+{
+	ktime_t timeout;
+	u32 val;
+
+	iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
+
+	val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+	val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
+	val |= REG_INMAIL_OP_REQUEST | cmd;
+	iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
+
+	timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
+	do {
+		val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+		if (!(val & REG_INMAIL_OP_REQUEST))
+			break;
+		usleep_range(10, 20);
+	} while (ktime_before(ktime_get(), timeout));
+
+	if (val & REG_INMAIL_OP_REQUEST)
+		return -ETIMEDOUT;
+	if (val & REG_INMAIL_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * nhi_mailbox_mode() - Return current firmware operation mode
+ * @nhi: Pointer to the NHI structure
+ *
+ * The function reads current firmware operation mode using NHI mailbox
+ * registers and returns it to the caller.
+ */
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
+{
+	u32 val;
+
+	val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
+	val &= REG_OUTMAIL_CMD_OPMODE_MASK;
+	val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
+
+	return (enum nhi_fw_mode)val;
+}
+
+static void nhi_interrupt_work(struct work_struct *work)
+{
+	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
+	int value = 0; /* Suppress uninitialized usage warning. */
+	int bit;
+	int hop = -1;
+	int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
+	struct tb_ring *ring;
+
+	spin_lock_irq(&nhi->lock);
+
+	/*
+	 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
+	 * (TX, RX, RX overflow). We iterate over the bits and read a new
+	 * dwords as required. The registers are cleared on read.
+	 */
+	for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
+		if (bit % 32 == 0)
+			value = ioread32(nhi->iobase
+					 + REG_RING_NOTIFY_BASE
+					 + 4 * (bit / 32));
+		if (++hop == nhi->hop_count) {
+			hop = 0;
+			type++;
+		}
+		if ((value & (1 << (bit % 32))) == 0)
+			continue;
+		if (type == 2) {
+			dev_warn(&nhi->pdev->dev,
+				 "RX overflow for ring %d\n",
+				 hop);
+			continue;
+		}
+		if (type == 0)
+			ring = nhi->tx_rings[hop];
+		else
+			ring = nhi->rx_rings[hop];
+		if (ring == NULL) {
+			dev_warn(&nhi->pdev->dev,
+				 "got interrupt for inactive %s ring %d\n",
+				 type ? "RX" : "TX",
+				 hop);
+			continue;
+		}
+
+		spin_lock(&ring->lock);
+		__ring_interrupt(ring);
+		spin_unlock(&ring->lock);
+	}
+	spin_unlock_irq(&nhi->lock);
+}
+
+static irqreturn_t nhi_msi(int irq, void *data)
+{
+	struct tb_nhi *nhi = data;
+	schedule_work(&nhi->interrupt_work);
+	return IRQ_HANDLED;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_suspend_noirq(tb);
+}
+
+static void nhi_enable_int_throttling(struct tb_nhi *nhi)
+{
+	/* Throttling is specified in 256ns increments */
+	u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
+	unsigned int i;
+
+	/*
+	 * Configure interrupt throttling for all vectors even if we
+	 * only use few.
+	 */
+	for (i = 0; i < MSIX_MAX_VECS; i++) {
+		u32 reg = REG_INT_THROTTLING_RATE + i * 4;
+		iowrite32(throttle, nhi->iobase + reg);
+	}
+}
+
+static int nhi_resume_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	/*
+	 * Check that the device is still there. It may be that the user
+	 * unplugged last device which causes the host controller to go
+	 * away on PCs.
+	 */
+	if (!pci_device_is_present(pdev))
+		tb->nhi->going_away = true;
+	else
+		nhi_enable_int_throttling(tb->nhi);
+
+	return tb_domain_resume_noirq(tb);
+}
+
+static int nhi_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_suspend(tb);
+}
+
+static void nhi_complete(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	/*
+	 * If we were runtime suspended when system suspend started,
+	 * schedule runtime resume now. It should bring the domain back
+	 * to functional state.
+	 */
+	if (pm_runtime_suspended(&pdev->dev))
+		pm_runtime_resume(&pdev->dev);
+	else
+		tb_domain_complete(tb);
+}
+
+static int nhi_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_runtime_suspend(tb);
+}
+
+static int nhi_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	nhi_enable_int_throttling(tb->nhi);
+	return tb_domain_runtime_resume(tb);
+}
+
+static void nhi_shutdown(struct tb_nhi *nhi)
+{
+	int i;
+	dev_info(&nhi->pdev->dev, "shutdown\n");
+
+	for (i = 0; i < nhi->hop_count; i++) {
+		if (nhi->tx_rings[i])
+			dev_WARN(&nhi->pdev->dev,
+				 "TX ring %d is still active\n", i);
+		if (nhi->rx_rings[i])
+			dev_WARN(&nhi->pdev->dev,
+				 "RX ring %d is still active\n", i);
+	}
+	nhi_disable_interrupts(nhi);
+	/*
+	 * We have to release the irq before calling flush_work. Otherwise an
+	 * already executing IRQ handler could call schedule_work again.
+	 */
+	if (!nhi->pdev->msix_enabled) {
+		devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+		flush_work(&nhi->interrupt_work);
+	}
+	ida_destroy(&nhi->msix_ida);
+}
+
+static int nhi_init_msi(struct tb_nhi *nhi)
+{
+	struct pci_dev *pdev = nhi->pdev;
+	int res, irq, nvec;
+
+	/* In case someone left them on. */
+	nhi_disable_interrupts(nhi);
+
+	nhi_enable_int_throttling(nhi);
+
+	ida_init(&nhi->msix_ida);
+
+	/*
+	 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
+	 * get all MSI-X vectors and if we succeed, each ring will have
+	 * one MSI-X. If for some reason that does not work out, we
+	 * fallback to a single MSI.
+	 */
+	nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
+				     PCI_IRQ_MSIX);
+	if (nvec < 0) {
+		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+		if (nvec < 0)
+			return nvec;
+
+		INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+		irq = pci_irq_vector(nhi->pdev, 0);
+		if (irq < 0)
+			return irq;
+
+		res = devm_request_irq(&pdev->dev, irq, nhi_msi,
+				       IRQF_NO_SUSPEND, "thunderbolt", nhi);
+		if (res) {
+			dev_err(&pdev->dev, "request_irq failed, aborting\n");
+			return res;
+		}
+	}
+
+	return 0;
+}
+
+static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct tb_nhi *nhi;
+	struct tb *tb;
+	int res;
+
+	res = pcim_enable_device(pdev);
+	if (res) {
+		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
+		return res;
+	}
+
+	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
+	if (res) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+		return res;
+	}
+
+	nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
+	if (!nhi)
+		return -ENOMEM;
+
+	nhi->pdev = pdev;
+	/* cannot fail - table is allocated bin pcim_iomap_regions */
+	nhi->iobase = pcim_iomap_table(pdev)[0];
+	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+	if (nhi->hop_count != 12 && nhi->hop_count != 32)
+		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
+			 nhi->hop_count);
+
+	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+				     sizeof(*nhi->tx_rings), GFP_KERNEL);
+	nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+				     sizeof(*nhi->rx_rings), GFP_KERNEL);
+	if (!nhi->tx_rings || !nhi->rx_rings)
+		return -ENOMEM;
+
+	res = nhi_init_msi(nhi);
+	if (res) {
+		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
+		return res;
+	}
+
+	spin_lock_init(&nhi->lock);
+
+	res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (res)
+		res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (res) {
+		dev_err(&pdev->dev, "failed to set DMA mask\n");
+		return res;
+	}
+
+	pci_set_master(pdev);
+
+	tb = icm_probe(nhi);
+	if (!tb)
+		tb = tb_probe(nhi);
+	if (!tb) {
+		dev_err(&nhi->pdev->dev,
+			"failed to determine connection manager, aborting\n");
+		return -ENODEV;
+	}
+
+	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+
+	res = tb_domain_add(tb);
+	if (res) {
+		/*
+		 * At this point the RX/TX rings might already have been
+		 * activated. Do a proper shutdown.
+		 */
+		tb_domain_put(tb);
+		nhi_shutdown(nhi);
+		return res;
+	}
+	pci_set_drvdata(pdev, tb);
+
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
+	return 0;
+}
+
+static void nhi_remove(struct pci_dev *pdev)
+{
+	struct tb *tb = pci_get_drvdata(pdev);
+	struct tb_nhi *nhi = tb->nhi;
+
+	pm_runtime_get_sync(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+
+	tb_domain_remove(tb);
+	nhi_shutdown(nhi);
+}
+
+/*
+ * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
+ * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
+ * resume_noirq until we are done.
+ */
+static const struct dev_pm_ops nhi_pm_ops = {
+	.suspend_noirq = nhi_suspend_noirq,
+	.resume_noirq = nhi_resume_noirq,
+	.freeze_noirq = nhi_suspend_noirq, /*
+					    * we just disable hotplug, the
+					    * pci-tunnels stay alive.
+					    */
+	.thaw_noirq = nhi_resume_noirq,
+	.restore_noirq = nhi_resume_noirq,
+	.suspend = nhi_suspend,
+	.freeze = nhi_suspend,
+	.poweroff = nhi_suspend,
+	.complete = nhi_complete,
+	.runtime_suspend = nhi_runtime_suspend,
+	.runtime_resume = nhi_runtime_resume,
+};
+
+static struct pci_device_id nhi_ids[] = {
+	/*
+	 * We have to specify class, the TB bridges use the same device and
+	 * vendor (sub)id on gen 1 and gen 2 controllers.
+	 */
+	{
+		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
+		.subvendor = 0x2222, .subdevice = 0x1111,
+	},
+	{
+		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+		.subvendor = 0x2222, .subdevice = 0x1111,
+	},
+	{
+		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+	},
+	{
+		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
+		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+	},
+
+	/* Thunderbolt 3 */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+
+	{ 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_LICENSE("GPL");
+
+static struct pci_driver nhi_driver = {
+	.name = "thunderbolt",
+	.id_table = nhi_ids,
+	.probe = nhi_probe,
+	.remove = nhi_remove,
+	.driver.pm = &nhi_pm_ops,
+};
+
+static int __init nhi_init(void)
+{
+	int ret;
+
+	ret = tb_domain_init();
+	if (ret)
+		return ret;
+	ret = pci_register_driver(&nhi_driver);
+	if (ret)
+		tb_domain_exit();
+	return ret;
+}
+
+static void __exit nhi_unload(void)
+{
+	pci_unregister_driver(&nhi_driver);
+	tb_domain_exit();
+}
+
+rootfs_initcall(nhi_init);
+module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
new file mode 100644
index 0000000..1696a45
--- /dev/null
+++ b/drivers/thunderbolt/nhi.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt Cactus Ridge driver - NHI driver
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef DSL3510_H_
+#define DSL3510_H_
+
+#include <linux/thunderbolt.h>
+
+enum nhi_fw_mode {
+	NHI_FW_SAFE_MODE,
+	NHI_FW_AUTH_MODE,
+	NHI_FW_EP_MODE,
+	NHI_FW_CM_MODE,
+};
+
+enum nhi_mailbox_cmd {
+	NHI_MAILBOX_SAVE_DEVS = 0x05,
+	NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
+	NHI_MAILBOX_DRV_UNLOADS = 0x07,
+	NHI_MAILBOX_DISCONNECT_PA = 0x10,
+	NHI_MAILBOX_DISCONNECT_PB = 0x11,
+	NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
+};
+
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+
+/*
+ * PCI IDs used in this driver from Win Ridge forward. There is no
+ * need for the PCI quirk anymore as we will use ICM also on Apple
+ * hardware.
+ */
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI            0x157d
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE         0x157e
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI		0x15bf
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE	0x15c0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI	0x15d2
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE	0x15d3
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI	0x15d9
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE	0x15da
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI	0x15dc
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI	0x15dd
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI	0x15de
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE	0x15e7
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI		0x15e8
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE	0x15ea
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI		0x15eb
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE	0x15ef
+
+#endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
new file mode 100644
index 0000000..b3e49d1
--- /dev/null
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - NHI registers
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef NHI_REGS_H_
+#define NHI_REGS_H_
+
+#include <linux/types.h>
+
+enum ring_flags {
+	RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
+	RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
+	RING_FLAG_PCI_NO_SNOOP = 1 << 29,
+	RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
+	RING_FLAG_ENABLE = 1 << 31,
+};
+
+/**
+ * struct ring_desc - TX/RX ring entry
+ *
+ * For TX set length/eof/sof.
+ * For RX length/eof/sof are set by the NHI.
+ */
+struct ring_desc {
+	u64 phys;
+	u32 length:12;
+	u32 eof:4;
+	u32 sof:4;
+	enum ring_desc_flags flags:12;
+	u32 time; /* write zero */
+} __packed;
+
+/* NHI registers in bar 0 */
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring tail (set by NHI)
+ * 10: ring head (index of first non posted descriptor)
+ * 12: descriptor count
+ */
+#define REG_TX_RING_BASE	0x00000
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring head (index of first not posted descriptor)
+ * 10: ring tail (set by NHI)
+ * 12: descriptor count
+ * 14: max frame sizes (anything larger than 0x100 has no effect)
+ */
+#define REG_RX_RING_BASE	0x08000
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum_ring_flags
+ * 04: isoch time stamp ?? (write 0)
+ * ..: unknown
+ */
+#define REG_TX_OPTIONS_BASE	0x19800
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum ring_flags
+ *     If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
+ *     the corresponding TX hop id.
+ * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
+ * ..: unknown
+ */
+#define REG_RX_OPTIONS_BASE	0x29800
+#define REG_RX_OPTIONS_E2E_HOP_MASK	GENMASK(22, 12)
+#define REG_RX_OPTIONS_E2E_HOP_SHIFT	12
+
+/*
+ * three bitfields: tx, rx, rx overflow
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+ * cleared on read. New interrupts are fired only after ALL registers have been
+ * read (even those containing only disabled rings).
+ */
+#define REG_RING_NOTIFY_BASE	0x37800
+#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+
+/*
+ * two bitfields: rx, tx
+ * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
+ * enable/disable interrupts set/clear the corresponding bits.
+ */
+#define REG_RING_INTERRUPT_BASE	0x38200
+#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+
+#define REG_INT_THROTTLING_RATE	0x38c00
+
+/* Interrupt Vector Allocation */
+#define REG_INT_VEC_ALLOC_BASE	0x38c40
+#define REG_INT_VEC_ALLOC_BITS	4
+#define REG_INT_VEC_ALLOC_MASK	GENMASK(3, 0)
+#define REG_INT_VEC_ALLOC_REGS	(32 / REG_INT_VEC_ALLOC_BITS)
+
+/* The last 11 bits contain the number of hops supported by the NHI port. */
+#define REG_HOP_COUNT		0x39640
+
+#define REG_DMA_MISC			0x39864
+#define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
+
+#define REG_INMAIL_DATA			0x39900
+
+#define REG_INMAIL_CMD			0x39904
+#define REG_INMAIL_CMD_MASK		GENMASK(7, 0)
+#define REG_INMAIL_ERROR		BIT(30)
+#define REG_INMAIL_OP_REQUEST		BIT(31)
+
+#define REG_OUTMAIL_CMD			0x3990c
+#define REG_OUTMAIL_CMD_OPMODE_SHIFT	8
+#define REG_OUTMAIL_CMD_OPMODE_MASK	GENMASK(11, 8)
+
+#define REG_FW_STS			0x39944
+#define REG_FW_STS_NVM_AUTH_DONE	BIT(31)
+#define REG_FW_STS_CIO_RESET_REQ	BIT(30)
+#define REG_FW_STS_ICM_EN_CPU		BIT(2)
+#define REG_FW_STS_ICM_EN_INVERT	BIT(1)
+#define REG_FW_STS_ICM_EN		BIT(0)
+
+#endif
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
new file mode 100644
index 0000000..ff49ad8
--- /dev/null
+++ b/drivers/thunderbolt/path.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - path/tunnel functionality
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include "tb.h"
+
+
+static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
+{
+	tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
+		     hop->out_port, hop->next_hop,
+		     hop->enable ? "enabled" : "disabled");
+	tb_port_info(port, "  Weight: %d Priority: %d Credits: %d Drop: %d\n",
+		     hop->weight, hop->priority,
+		     hop->initial_credits, hop->drop_packages);
+	tb_port_info(port, "   Counter enabled: %d Counter index: %d\n",
+		     hop->counter_enable, hop->counter);
+	tb_port_info(port, "  Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+		     hop->ingress_fc, hop->egress_fc,
+		     hop->ingress_shared_buffer, hop->egress_shared_buffer);
+	tb_port_info(port, "  Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+		     hop->unknown1, hop->unknown2, hop->unknown3);
+}
+
+/**
+ * tb_path_alloc() - allocate a thunderbolt path
+ *
+ * Return: Returns a tb_path on success or NULL on failure.
+ */
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
+{
+	struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
+	if (!path)
+		return NULL;
+	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+	if (!path->hops) {
+		kfree(path);
+		return NULL;
+	}
+	path->tb = tb;
+	path->path_length = num_hops;
+	return path;
+}
+
+/**
+ * tb_path_free() - free a deactivated path
+ */
+void tb_path_free(struct tb_path *path)
+{
+	if (path->activated) {
+		tb_WARN(path->tb, "trying to free an activated path\n")
+		return;
+	}
+	kfree(path->hops);
+	kfree(path);
+}
+
+static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
+{
+	int i, res;
+	for (i = first_hop; i < path->path_length; i++) {
+		res = tb_port_add_nfc_credits(path->hops[i].in_port,
+					      -path->nfc_credits);
+		if (res)
+			tb_port_warn(path->hops[i].in_port,
+				     "nfc credits deallocation failed for hop %d\n",
+				     i);
+	}
+}
+
+static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
+{
+	int i, res;
+	struct tb_regs_hop hop = { };
+	for (i = first_hop; i < path->path_length; i++) {
+		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+				    2 * path->hops[i].in_hop_index, 2);
+		if (res)
+			tb_port_warn(path->hops[i].in_port,
+				     "hop deactivation failed for hop %d, index %d\n",
+				     i, path->hops[i].in_hop_index);
+	}
+}
+
+void tb_path_deactivate(struct tb_path *path)
+{
+	if (!path->activated) {
+		tb_WARN(path->tb, "trying to deactivate an inactive path\n");
+		return;
+	}
+	tb_info(path->tb,
+		"deactivating path from %llx:%x to %llx:%x\n",
+		tb_route(path->hops[0].in_port->sw),
+		path->hops[0].in_port->port,
+		tb_route(path->hops[path->path_length - 1].out_port->sw),
+		path->hops[path->path_length - 1].out_port->port);
+	__tb_path_deactivate_hops(path, 0);
+	__tb_path_deallocate_nfc(path, 0);
+	path->activated = false;
+}
+
+/**
+ * tb_path_activate() - activate a path
+ *
+ * Activate a path starting with the last hop and iterating backwards. The
+ * caller must fill path->hops before calling tb_path_activate().
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_path_activate(struct tb_path *path)
+{
+	int i, res;
+	enum tb_path_port out_mask, in_mask;
+	if (path->activated) {
+		tb_WARN(path->tb, "trying to activate already activated path\n");
+		return -EINVAL;
+	}
+
+	tb_info(path->tb,
+		"activating path from %llx:%x to %llx:%x\n",
+		tb_route(path->hops[0].in_port->sw),
+		path->hops[0].in_port->port,
+		tb_route(path->hops[path->path_length - 1].out_port->sw),
+		path->hops[path->path_length - 1].out_port->port);
+
+	/* Clear counters. */
+	for (i = path->path_length - 1; i >= 0; i--) {
+		if (path->hops[i].in_counter_index == -1)
+			continue;
+		res = tb_port_clear_counter(path->hops[i].in_port,
+					    path->hops[i].in_counter_index);
+		if (res)
+			goto err;
+	}
+
+	/* Add non flow controlled credits. */
+	for (i = path->path_length - 1; i >= 0; i--) {
+		res = tb_port_add_nfc_credits(path->hops[i].in_port,
+					      path->nfc_credits);
+		if (res) {
+			__tb_path_deallocate_nfc(path, i);
+			goto err;
+		}
+	}
+
+	/* Activate hops. */
+	for (i = path->path_length - 1; i >= 0; i--) {
+		struct tb_regs_hop hop = { 0 };
+
+		/*
+		 * We do (currently) not tear down paths setup by the firmeware.
+		 * If a firmware device is unplugged and plugged in again then
+		 * it can happen that we reuse some of the hops from the (now
+		 * defunct) firmeware path. This causes the hotplug operation to
+		 * fail (the pci device does not show up). Clearing the hop
+		 * before overwriting it fixes the problem.
+		 *
+		 * Should be removed once we discover and tear down firmeware
+		 * paths.
+		 */
+		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+				    2 * path->hops[i].in_hop_index, 2);
+		if (res) {
+			__tb_path_deactivate_hops(path, i);
+			__tb_path_deallocate_nfc(path, 0);
+			goto err;
+		}
+
+		/* dword 0 */
+		hop.next_hop = path->hops[i].next_hop_index;
+		hop.out_port = path->hops[i].out_port->port;
+		/* TODO: figure out why these are good values */
+		hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
+		hop.unknown1 = 0;
+		hop.enable = 1;
+
+		/* dword 1 */
+		out_mask = (i == path->path_length - 1) ?
+				TB_PATH_DESTINATION : TB_PATH_INTERNAL;
+		in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
+		hop.weight = path->weight;
+		hop.unknown2 = 0;
+		hop.priority = path->priority;
+		hop.drop_packages = path->drop_packages;
+		hop.counter = path->hops[i].in_counter_index;
+		hop.counter_enable = path->hops[i].in_counter_index != -1;
+		hop.ingress_fc = path->ingress_fc_enable & in_mask;
+		hop.egress_fc = path->egress_fc_enable & out_mask;
+		hop.ingress_shared_buffer = path->ingress_shared_buffer
+					    & in_mask;
+		hop.egress_shared_buffer = path->egress_shared_buffer
+					    & out_mask;
+		hop.unknown3 = 0;
+
+		tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
+			     i, path->hops[i].in_hop_index);
+		tb_dump_hop(path->hops[i].in_port, &hop);
+		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+				    2 * path->hops[i].in_hop_index, 2);
+		if (res) {
+			__tb_path_deactivate_hops(path, i);
+			__tb_path_deallocate_nfc(path, 0);
+			goto err;
+		}
+	}
+	path->activated = true;
+	tb_info(path->tb, "path activation complete\n");
+	return 0;
+err:
+	tb_WARN(path->tb, "path activation failed\n");
+	return res;
+}
+
+/**
+ * tb_path_is_invalid() - check whether any ports on the path are invalid
+ *
+ * Return: Returns true if the path is invalid, false otherwise.
+ */
+bool tb_path_is_invalid(struct tb_path *path)
+{
+	int i = 0;
+	for (i = 0; i < path->path_length; i++) {
+		if (path->hops[i].in_port->sw->is_unplugged)
+			return true;
+		if (path->hops[i].out_port->sw->is_unplugged)
+			return true;
+	}
+	return false;
+}
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
new file mode 100644
index 0000000..8fe913a
--- /dev/null
+++ b/drivers/thunderbolt/property.c
@@ -0,0 +1,670 @@
+/*
+ * Thunderbolt XDomain property support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+#include <linux/thunderbolt.h>
+
+struct tb_property_entry {
+	u32 key_hi;
+	u32 key_lo;
+	u16 length;
+	u8 reserved;
+	u8 type;
+	u32 value;
+};
+
+struct tb_property_rootdir_entry {
+	u32 magic;
+	u32 length;
+	struct tb_property_entry entries[];
+};
+
+struct tb_property_dir_entry {
+	u32 uuid[4];
+	struct tb_property_entry entries[];
+};
+
+#define TB_PROPERTY_ROOTDIR_MAGIC	0x55584401
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+	size_t block_len, unsigned int dir_offset, size_t dir_len,
+	bool is_root);
+
+static inline void parse_dwdata(void *dst, const void *src, size_t dwords)
+{
+	be32_to_cpu_array(dst, src, dwords);
+}
+
+static inline void format_dwdata(void *dst, const void *src, size_t dwords)
+{
+	cpu_to_be32_array(dst, src, dwords);
+}
+
+static bool tb_property_entry_valid(const struct tb_property_entry *entry,
+				  size_t block_len)
+{
+	switch (entry->type) {
+	case TB_PROPERTY_TYPE_DIRECTORY:
+	case TB_PROPERTY_TYPE_DATA:
+	case TB_PROPERTY_TYPE_TEXT:
+		if (entry->length > block_len)
+			return false;
+		if (entry->value + entry->length > block_len)
+			return false;
+		break;
+
+	case TB_PROPERTY_TYPE_VALUE:
+		if (entry->length != 1)
+			return false;
+		break;
+	}
+
+	return true;
+}
+
+static bool tb_property_key_valid(const char *key)
+{
+	return key && strlen(key) <= TB_PROPERTY_KEY_SIZE;
+}
+
+static struct tb_property *
+tb_property_alloc(const char *key, enum tb_property_type type)
+{
+	struct tb_property *property;
+
+	property = kzalloc(sizeof(*property), GFP_KERNEL);
+	if (!property)
+		return NULL;
+
+	strcpy(property->key, key);
+	property->type = type;
+	INIT_LIST_HEAD(&property->list);
+
+	return property;
+}
+
+static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
+					const struct tb_property_entry *entry)
+{
+	char key[TB_PROPERTY_KEY_SIZE + 1];
+	struct tb_property *property;
+	struct tb_property_dir *dir;
+
+	if (!tb_property_entry_valid(entry, block_len))
+		return NULL;
+
+	parse_dwdata(key, entry, 2);
+	key[TB_PROPERTY_KEY_SIZE] = '\0';
+
+	property = tb_property_alloc(key, entry->type);
+	if (!property)
+		return NULL;
+
+	property->length = entry->length;
+
+	switch (property->type) {
+	case TB_PROPERTY_TYPE_DIRECTORY:
+		dir = __tb_property_parse_dir(block, block_len, entry->value,
+					      entry->length, false);
+		if (!dir) {
+			kfree(property);
+			return NULL;
+		}
+		property->value.dir = dir;
+		break;
+
+	case TB_PROPERTY_TYPE_DATA:
+		property->value.data = kcalloc(property->length, sizeof(u32),
+					       GFP_KERNEL);
+		if (!property->value.data) {
+			kfree(property);
+			return NULL;
+		}
+		parse_dwdata(property->value.data, block + entry->value,
+			     entry->length);
+		break;
+
+	case TB_PROPERTY_TYPE_TEXT:
+		property->value.text = kcalloc(property->length, sizeof(u32),
+					       GFP_KERNEL);
+		if (!property->value.text) {
+			kfree(property);
+			return NULL;
+		}
+		parse_dwdata(property->value.text, block + entry->value,
+			     entry->length);
+		/* Force null termination */
+		property->value.text[property->length * 4 - 1] = '\0';
+		break;
+
+	case TB_PROPERTY_TYPE_VALUE:
+		property->value.immediate = entry->value;
+		break;
+
+	default:
+		property->type = TB_PROPERTY_TYPE_UNKNOWN;
+		break;
+	}
+
+	return property;
+}
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+	size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root)
+{
+	const struct tb_property_entry *entries;
+	size_t i, content_len, nentries;
+	unsigned int content_offset;
+	struct tb_property_dir *dir;
+
+	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+	if (!dir)
+		return NULL;
+
+	if (is_root) {
+		content_offset = dir_offset + 2;
+		content_len = dir_len;
+	} else {
+		dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
+				    GFP_KERNEL);
+		content_offset = dir_offset + 4;
+		content_len = dir_len - 4; /* Length includes UUID */
+	}
+
+	entries = (const struct tb_property_entry *)&block[content_offset];
+	nentries = content_len / (sizeof(*entries) / 4);
+
+	INIT_LIST_HEAD(&dir->properties);
+
+	for (i = 0; i < nentries; i++) {
+		struct tb_property *property;
+
+		property = tb_property_parse(block, block_len, &entries[i]);
+		if (!property) {
+			tb_property_free_dir(dir);
+			return NULL;
+		}
+
+		list_add_tail(&property->list, &dir->properties);
+	}
+
+	return dir;
+}
+
+/**
+ * tb_property_parse_dir() - Parses properties from given property block
+ * @block: Property block to parse
+ * @block_len: Number of dword elements in the property block
+ *
+ * This function parses the XDomain properties data block into format that
+ * can be traversed using the helper functions provided by this module.
+ * Upon success returns the parsed directory. In case of error returns
+ * %NULL. The resulting &struct tb_property_dir needs to be released by
+ * calling tb_property_free_dir() when not needed anymore.
+ *
+ * The @block is expected to be root directory.
+ */
+struct tb_property_dir *tb_property_parse_dir(const u32 *block,
+					      size_t block_len)
+{
+	const struct tb_property_rootdir_entry *rootdir =
+		(const struct tb_property_rootdir_entry *)block;
+
+	if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC)
+		return NULL;
+	if (rootdir->length > block_len)
+		return NULL;
+
+	return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
+				       true);
+}
+
+/**
+ * tb_property_create_dir() - Creates new property directory
+ * @uuid: UUID used to identify the particular directory
+ *
+ * Creates new, empty property directory. If @uuid is %NULL then the
+ * directory is assumed to be root directory.
+ */
+struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
+{
+	struct tb_property_dir *dir;
+
+	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+	if (!dir)
+		return NULL;
+
+	INIT_LIST_HEAD(&dir->properties);
+	if (uuid) {
+		dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL);
+		if (!dir->uuid) {
+			kfree(dir);
+			return NULL;
+		}
+	}
+
+	return dir;
+}
+EXPORT_SYMBOL_GPL(tb_property_create_dir);
+
+static void tb_property_free(struct tb_property *property)
+{
+	switch (property->type) {
+	case TB_PROPERTY_TYPE_DIRECTORY:
+		tb_property_free_dir(property->value.dir);
+		break;
+
+	case TB_PROPERTY_TYPE_DATA:
+		kfree(property->value.data);
+		break;
+
+	case TB_PROPERTY_TYPE_TEXT:
+		kfree(property->value.text);
+		break;
+
+	default:
+		break;
+	}
+
+	kfree(property);
+}
+
+/**
+ * tb_property_free_dir() - Release memory allocated for property directory
+ * @dir: Directory to release
+ *
+ * This will release all the memory the directory occupies including all
+ * descendants. It is OK to pass %NULL @dir, then the function does
+ * nothing.
+ */
+void tb_property_free_dir(struct tb_property_dir *dir)
+{
+	struct tb_property *property, *tmp;
+
+	if (!dir)
+		return;
+
+	list_for_each_entry_safe(property, tmp, &dir->properties, list) {
+		list_del(&property->list);
+		tb_property_free(property);
+	}
+	kfree(dir->uuid);
+	kfree(dir);
+}
+EXPORT_SYMBOL_GPL(tb_property_free_dir);
+
+static size_t tb_property_dir_length(const struct tb_property_dir *dir,
+				     bool recurse, size_t *data_len)
+{
+	const struct tb_property *property;
+	size_t len = 0;
+
+	if (dir->uuid)
+		len += sizeof(*dir->uuid) / 4;
+	else
+		len += sizeof(struct tb_property_rootdir_entry) / 4;
+
+	list_for_each_entry(property, &dir->properties, list) {
+		len += sizeof(struct tb_property_entry) / 4;
+
+		switch (property->type) {
+		case TB_PROPERTY_TYPE_DIRECTORY:
+			if (recurse) {
+				len += tb_property_dir_length(
+					property->value.dir, recurse, data_len);
+			}
+			/* Reserve dword padding after each directory */
+			if (data_len)
+				*data_len += 1;
+			break;
+
+		case TB_PROPERTY_TYPE_DATA:
+		case TB_PROPERTY_TYPE_TEXT:
+			if (data_len)
+				*data_len += property->length;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	return len;
+}
+
+static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
+	u32 *block, unsigned int start_offset, size_t block_len)
+{
+	unsigned int data_offset, dir_end;
+	const struct tb_property *property;
+	struct tb_property_entry *entry;
+	size_t dir_len, data_len = 0;
+	int ret;
+
+	/*
+	 * The structure of property block looks like following. Leaf
+	 * data/text is included right after the directory and each
+	 * directory follows each other (even nested ones).
+	 *
+	 * +----------+ <-- start_offset
+	 * |  header  | <-- root directory header
+	 * +----------+ ---
+	 * |  entry 0 | -^--------------------.
+	 * +----------+  |                    |
+	 * |  entry 1 | -|--------------------|--.
+	 * +----------+  |                    |  |
+	 * |  entry 2 | -|-----------------.  |  |
+	 * +----------+  |                 |  |  |
+	 * :          :  |  dir_len        |  |  |
+	 * .          .  |                 |  |  |
+	 * :          :  |                 |  |  |
+	 * +----------+  |                 |  |  |
+	 * |  entry n |  v                 |  |  |
+	 * +----------+ <-- data_offset    |  |  |
+	 * |  data 0  | <------------------|--'  |
+	 * +----------+                    |     |
+	 * |  data 1  | <------------------|-----'
+	 * +----------+                    |
+	 * | 00000000 | padding            |
+	 * +----------+ <-- dir_end <------'
+	 * |   UUID   | <-- directory UUID (child directory)
+	 * +----------+
+	 * |  entry 0 |
+	 * +----------+
+	 * |  entry 1 |
+	 * +----------+
+	 * :          :
+	 * .          .
+	 * :          :
+	 * +----------+
+	 * |  entry n |
+	 * +----------+
+	 * |  data 0  |
+	 * +----------+
+	 *
+	 * We use dir_end to hold pointer to the end of the directory. It
+	 * will increase as we add directories and each directory should be
+	 * added starting from previous dir_end.
+	 */
+	dir_len = tb_property_dir_length(dir, false, &data_len);
+	data_offset = start_offset + dir_len;
+	dir_end = start_offset + data_len + dir_len;
+
+	if (data_offset > dir_end)
+		return -EINVAL;
+	if (dir_end > block_len)
+		return -EINVAL;
+
+	/* Write headers first */
+	if (dir->uuid) {
+		struct tb_property_dir_entry *pe;
+
+		pe = (struct tb_property_dir_entry *)&block[start_offset];
+		memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid));
+		entry = pe->entries;
+	} else {
+		struct tb_property_rootdir_entry *re;
+
+		re = (struct tb_property_rootdir_entry *)&block[start_offset];
+		re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
+		re->length = dir_len - sizeof(*re) / 4;
+		entry = re->entries;
+	}
+
+	list_for_each_entry(property, &dir->properties, list) {
+		const struct tb_property_dir *child;
+
+		format_dwdata(entry, property->key, 2);
+		entry->type = property->type;
+
+		switch (property->type) {
+		case TB_PROPERTY_TYPE_DIRECTORY:
+			child = property->value.dir;
+			ret = __tb_property_format_dir(child, block, dir_end,
+						       block_len);
+			if (ret < 0)
+				return ret;
+			entry->length = tb_property_dir_length(child, false,
+							       NULL);
+			entry->value = dir_end;
+			dir_end = ret;
+			break;
+
+		case TB_PROPERTY_TYPE_DATA:
+			format_dwdata(&block[data_offset], property->value.data,
+				      property->length);
+			entry->length = property->length;
+			entry->value = data_offset;
+			data_offset += entry->length;
+			break;
+
+		case TB_PROPERTY_TYPE_TEXT:
+			format_dwdata(&block[data_offset], property->value.text,
+				      property->length);
+			entry->length = property->length;
+			entry->value = data_offset;
+			data_offset += entry->length;
+			break;
+
+		case TB_PROPERTY_TYPE_VALUE:
+			entry->length = property->length;
+			entry->value = property->value.immediate;
+			break;
+
+		default:
+			break;
+		}
+
+		entry++;
+	}
+
+	return dir_end;
+}
+
+/**
+ * tb_property_format_dir() - Formats directory to the packed XDomain format
+ * @dir: Directory to format
+ * @block: Property block where the packed data is placed
+ * @block_len: Length of the property block
+ *
+ * This function formats the directory to the packed format that can be
+ * then send over the thunderbolt fabric to receiving host. Returns %0 in
+ * case of success and negative errno on faulure. Passing %NULL in @block
+ * returns number of entries the block takes.
+ */
+ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
+			       size_t block_len)
+{
+	ssize_t ret;
+
+	if (!block) {
+		size_t dir_len, data_len = 0;
+
+		dir_len = tb_property_dir_length(dir, true, &data_len);
+		return dir_len + data_len;
+	}
+
+	ret = __tb_property_format_dir(dir, block, 0, block_len);
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * tb_property_add_immediate() - Add immediate property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @value: Immediate value to store with the property
+ */
+int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
+			      u32 value)
+{
+	struct tb_property *property;
+
+	if (!tb_property_key_valid(key))
+		return -EINVAL;
+
+	property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE);
+	if (!property)
+		return -ENOMEM;
+
+	property->length = 1;
+	property->value.immediate = value;
+
+	list_add_tail(&property->list, &parent->properties);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_immediate);
+
+/**
+ * tb_property_add_data() - Adds arbitrary data property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @buf: Data buffer to add
+ * @buflen: Number of bytes in the data buffer
+ *
+ * Function takes a copy of @buf and adds it to the directory.
+ */
+int tb_property_add_data(struct tb_property_dir *parent, const char *key,
+			 const void *buf, size_t buflen)
+{
+	/* Need to pad to dword boundary */
+	size_t size = round_up(buflen, 4);
+	struct tb_property *property;
+
+	if (!tb_property_key_valid(key))
+		return -EINVAL;
+
+	property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA);
+	if (!property)
+		return -ENOMEM;
+
+	property->length = size / 4;
+	property->value.data = kzalloc(size, GFP_KERNEL);
+	memcpy(property->value.data, buf, buflen);
+
+	list_add_tail(&property->list, &parent->properties);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_data);
+
+/**
+ * tb_property_add_text() - Adds string property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @text: String to add
+ *
+ * Function takes a copy of @text and adds it to the directory.
+ */
+int tb_property_add_text(struct tb_property_dir *parent, const char *key,
+			 const char *text)
+{
+	/* Need to pad to dword boundary */
+	size_t size = round_up(strlen(text) + 1, 4);
+	struct tb_property *property;
+
+	if (!tb_property_key_valid(key))
+		return -EINVAL;
+
+	property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT);
+	if (!property)
+		return -ENOMEM;
+
+	property->length = size / 4;
+	property->value.data = kzalloc(size, GFP_KERNEL);
+	strcpy(property->value.text, text);
+
+	list_add_tail(&property->list, &parent->properties);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_text);
+
+/**
+ * tb_property_add_dir() - Adds a directory to the parent directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @dir: Directory to add
+ */
+int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
+			struct tb_property_dir *dir)
+{
+	struct tb_property *property;
+
+	if (!tb_property_key_valid(key))
+		return -EINVAL;
+
+	property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY);
+	if (!property)
+		return -ENOMEM;
+
+	property->value.dir = dir;
+
+	list_add_tail(&property->list, &parent->properties);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_dir);
+
+/**
+ * tb_property_remove() - Removes property from a parent directory
+ * @property: Property to remove
+ *
+ * Note memory for @property is released as well so it is not allowed to
+ * touch the object after call to this function.
+ */
+void tb_property_remove(struct tb_property *property)
+{
+	list_del(&property->list);
+	kfree(property);
+}
+EXPORT_SYMBOL_GPL(tb_property_remove);
+
+/**
+ * tb_property_find() - Find a property from a directory
+ * @dir: Directory where the property is searched
+ * @key: Key to look for
+ * @type: Type of the property
+ *
+ * Finds and returns property from the given directory. Does not recurse
+ * into sub-directories. Returns %NULL if the property was not found.
+ */
+struct tb_property *tb_property_find(struct tb_property_dir *dir,
+	const char *key, enum tb_property_type type)
+{
+	struct tb_property *property;
+
+	list_for_each_entry(property, &dir->properties, list) {
+		if (property->type == type && !strcmp(property->key, key))
+			return property;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_property_find);
+
+/**
+ * tb_property_get_next() - Get next property from directory
+ * @dir: Directory holding properties
+ * @prev: Previous property in the directory (%NULL returns the first)
+ */
+struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
+					 struct tb_property *prev)
+{
+	if (prev) {
+		if (list_is_last(&prev->list, &dir->properties))
+			return NULL;
+		return list_next_entry(prev, list);
+	}
+	return list_first_entry_or_null(&dir->properties, struct tb_property,
+					list);
+}
+EXPORT_SYMBOL_GPL(tb_property_get_next);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
new file mode 100644
index 0000000..dd9ae6f
--- /dev/null
+++ b/drivers/thunderbolt/switch.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - switch/port utility functions
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "tb.h"
+
+/* Switch authorization from userspace is serialized by this lock */
+static DEFINE_MUTEX(switch_lock);
+
+/* Switch NVM support */
+
+#define NVM_DEVID		0x05
+#define NVM_VERSION		0x08
+#define NVM_CSS			0x10
+#define NVM_FLASH_SIZE		0x45
+
+#define NVM_MIN_SIZE		SZ_32K
+#define NVM_MAX_SIZE		SZ_512K
+
+static DEFINE_IDA(nvm_ida);
+
+struct nvm_auth_status {
+	struct list_head list;
+	uuid_t uuid;
+	u32 status;
+};
+
+/*
+ * Hold NVM authentication failure status per switch This information
+ * needs to stay around even when the switch gets power cycled so we
+ * keep it separately.
+ */
+static LIST_HEAD(nvm_auth_status_cache);
+static DEFINE_MUTEX(nvm_auth_status_lock);
+
+static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
+{
+	struct nvm_auth_status *st;
+
+	list_for_each_entry(st, &nvm_auth_status_cache, list) {
+		if (uuid_equal(&st->uuid, sw->uuid))
+			return st;
+	}
+
+	return NULL;
+}
+
+static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
+{
+	struct nvm_auth_status *st;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+	mutex_unlock(&nvm_auth_status_lock);
+
+	*status = st ? st->status : 0;
+}
+
+static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
+{
+	struct nvm_auth_status *st;
+
+	if (WARN_ON(!sw->uuid))
+		return;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+
+	if (!st) {
+		st = kzalloc(sizeof(*st), GFP_KERNEL);
+		if (!st)
+			goto unlock;
+
+		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
+		INIT_LIST_HEAD(&st->list);
+		list_add_tail(&st->list, &nvm_auth_status_cache);
+	}
+
+	st->status = status;
+unlock:
+	mutex_unlock(&nvm_auth_status_lock);
+}
+
+static void nvm_clear_auth_status(const struct tb_switch *sw)
+{
+	struct nvm_auth_status *st;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+	if (st) {
+		list_del(&st->list);
+		kfree(st);
+	}
+	mutex_unlock(&nvm_auth_status_lock);
+}
+
+static int nvm_validate_and_write(struct tb_switch *sw)
+{
+	unsigned int image_size, hdr_size;
+	const u8 *buf = sw->nvm->buf;
+	u16 ds_size;
+	int ret;
+
+	if (!buf)
+		return -EINVAL;
+
+	image_size = sw->nvm->buf_data_size;
+	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+		return -EINVAL;
+
+	/*
+	 * FARB pointer must point inside the image and must at least
+	 * contain parts of the digital section we will be reading here.
+	 */
+	hdr_size = (*(u32 *)buf) & 0xffffff;
+	if (hdr_size + NVM_DEVID + 2 >= image_size)
+		return -EINVAL;
+
+	/* Digital section start should be aligned to 4k page */
+	if (!IS_ALIGNED(hdr_size, SZ_4K))
+		return -EINVAL;
+
+	/*
+	 * Read digital section size and check that it also fits inside
+	 * the image.
+	 */
+	ds_size = *(u16 *)(buf + hdr_size);
+	if (ds_size >= image_size)
+		return -EINVAL;
+
+	if (!sw->safe_mode) {
+		u16 device_id;
+
+		/*
+		 * Make sure the device ID in the image matches the one
+		 * we read from the switch config space.
+		 */
+		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
+		if (device_id != sw->config.device_id)
+			return -EINVAL;
+
+		if (sw->generation < 3) {
+			/* Write CSS headers first */
+			ret = dma_port_flash_write(sw->dma_port,
+				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
+				DMA_PORT_CSS_MAX_SIZE);
+			if (ret)
+				return ret;
+		}
+
+		/* Skip headers in the image */
+		buf += hdr_size;
+		image_size -= hdr_size;
+	}
+
+	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+}
+
+static int nvm_authenticate_host(struct tb_switch *sw)
+{
+	int ret;
+
+	/*
+	 * Root switch NVM upgrade requires that we disconnect the
+	 * existing paths first (in case it is not in safe mode
+	 * already).
+	 */
+	if (!sw->safe_mode) {
+		ret = tb_domain_disconnect_all_paths(sw->tb);
+		if (ret)
+			return ret;
+		/*
+		 * The host controller goes away pretty soon after this if
+		 * everything goes well so getting timeout is expected.
+		 */
+		ret = dma_port_flash_update_auth(sw->dma_port);
+		return ret == -ETIMEDOUT ? 0 : ret;
+	}
+
+	/*
+	 * From safe mode we can get out by just power cycling the
+	 * switch.
+	 */
+	dma_port_power_cycle(sw->dma_port);
+	return 0;
+}
+
+static int nvm_authenticate_device(struct tb_switch *sw)
+{
+	int ret, retries = 10;
+
+	ret = dma_port_flash_update_auth(sw->dma_port);
+	if (ret && ret != -ETIMEDOUT)
+		return ret;
+
+	/*
+	 * Poll here for the authentication status. It takes some time
+	 * for the device to respond (we get timeout for a while). Once
+	 * we get response the device needs to be power cycled in order
+	 * to the new NVM to be taken into use.
+	 */
+	do {
+		u32 status;
+
+		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+		if (ret < 0 && ret != -ETIMEDOUT)
+			return ret;
+		if (ret > 0) {
+			if (status) {
+				tb_sw_warn(sw, "failed to authenticate NVM\n");
+				nvm_set_auth_status(sw, status);
+			}
+
+			tb_sw_info(sw, "power cycling the switch now\n");
+			dma_port_power_cycle(sw->dma_port);
+			return 0;
+		}
+
+		msleep(500);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+			      size_t bytes)
+{
+	struct tb_switch *sw = priv;
+	int ret;
+
+	pm_runtime_get_sync(&sw->dev);
+	ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+
+	return ret;
+}
+
+static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+			       size_t bytes)
+{
+	struct tb_switch *sw = priv;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	/*
+	 * Since writing the NVM image might require some special steps,
+	 * for example when CSS headers are written, we cache the image
+	 * locally here and handle the special cases when the user asks
+	 * us to authenticate the image.
+	 */
+	if (!sw->nvm->buf) {
+		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
+		if (!sw->nvm->buf) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+	}
+
+	sw->nvm->buf_data_size = offset + bytes;
+	memcpy(sw->nvm->buf + offset, val, bytes);
+
+unlock:
+	mutex_unlock(&switch_lock);
+
+	return ret;
+}
+
+static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+					   size_t size, bool active)
+{
+	struct nvmem_config config;
+
+	memset(&config, 0, sizeof(config));
+
+	if (active) {
+		config.name = "nvm_active";
+		config.reg_read = tb_switch_nvm_read;
+		config.read_only = true;
+	} else {
+		config.name = "nvm_non_active";
+		config.reg_write = tb_switch_nvm_write;
+		config.root_only = true;
+	}
+
+	config.id = id;
+	config.stride = 4;
+	config.word_size = 4;
+	config.size = size;
+	config.dev = &sw->dev;
+	config.owner = THIS_MODULE;
+	config.priv = sw;
+
+	return nvmem_register(&config);
+}
+
+static int tb_switch_nvm_add(struct tb_switch *sw)
+{
+	struct nvmem_device *nvm_dev;
+	struct tb_switch_nvm *nvm;
+	u32 val;
+	int ret;
+
+	if (!sw->dma_port)
+		return 0;
+
+	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+	if (!nvm)
+		return -ENOMEM;
+
+	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+
+	/*
+	 * If the switch is in safe-mode the only accessible portion of
+	 * the NVM is the non-active one where userspace is expected to
+	 * write new functional NVM.
+	 */
+	if (!sw->safe_mode) {
+		u32 nvm_size, hdr_size;
+
+		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
+					  sizeof(val));
+		if (ret)
+			goto err_ida;
+
+		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+		nvm_size = (SZ_1M << (val & 7)) / 8;
+		nvm_size = (nvm_size - hdr_size) / 2;
+
+		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
+					  sizeof(val));
+		if (ret)
+			goto err_ida;
+
+		nvm->major = val >> 16;
+		nvm->minor = val >> 8;
+
+		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
+		if (IS_ERR(nvm_dev)) {
+			ret = PTR_ERR(nvm_dev);
+			goto err_ida;
+		}
+		nvm->active = nvm_dev;
+	}
+
+	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+	if (IS_ERR(nvm_dev)) {
+		ret = PTR_ERR(nvm_dev);
+		goto err_nvm_active;
+	}
+	nvm->non_active = nvm_dev;
+
+	mutex_lock(&switch_lock);
+	sw->nvm = nvm;
+	mutex_unlock(&switch_lock);
+
+	return 0;
+
+err_nvm_active:
+	if (nvm->active)
+		nvmem_unregister(nvm->active);
+err_ida:
+	ida_simple_remove(&nvm_ida, nvm->id);
+	kfree(nvm);
+
+	return ret;
+}
+
+static void tb_switch_nvm_remove(struct tb_switch *sw)
+{
+	struct tb_switch_nvm *nvm;
+
+	mutex_lock(&switch_lock);
+	nvm = sw->nvm;
+	sw->nvm = NULL;
+	mutex_unlock(&switch_lock);
+
+	if (!nvm)
+		return;
+
+	/* Remove authentication status in case the switch is unplugged */
+	if (!nvm->authenticating)
+		nvm_clear_auth_status(sw);
+
+	nvmem_unregister(nvm->non_active);
+	if (nvm->active)
+		nvmem_unregister(nvm->active);
+	ida_simple_remove(&nvm_ida, nvm->id);
+	vfree(nvm->buf);
+	kfree(nvm);
+}
+
+/* port utility functions */
+
+static const char *tb_port_type(struct tb_regs_port_header *port)
+{
+	switch (port->type >> 16) {
+	case 0:
+		switch ((u8) port->type) {
+		case 0:
+			return "Inactive";
+		case 1:
+			return "Port";
+		case 2:
+			return "NHI";
+		default:
+			return "unknown";
+		}
+	case 0x2:
+		return "Ethernet";
+	case 0x8:
+		return "SATA";
+	case 0xe:
+		return "DP/HDMI";
+	case 0x10:
+		return "PCIe";
+	case 0x20:
+		return "USB";
+	default:
+		return "unknown";
+	}
+}
+
+static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
+{
+	tb_info(tb,
+		" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+		port->port_number, port->vendor_id, port->device_id,
+		port->revision, port->thunderbolt_version, tb_port_type(port),
+		port->type);
+	tb_info(tb, "  Max hop id (in/out): %d/%d\n",
+		port->max_in_hop_id, port->max_out_hop_id);
+	tb_info(tb, "  Max counters: %d\n", port->max_counters);
+	tb_info(tb, "  NFC Credits: %#x\n", port->nfc_credits);
+}
+
+/**
+ * tb_port_state() - get connectedness state of a port
+ *
+ * The port must have a TB_CAP_PHY (i.e. it should be a real port).
+ *
+ * Return: Returns an enum tb_port_state on success or an error code on failure.
+ */
+static int tb_port_state(struct tb_port *port)
+{
+	struct tb_cap_phy phy;
+	int res;
+	if (port->cap_phy == 0) {
+		tb_port_WARN(port, "does not have a PHY\n");
+		return -EINVAL;
+	}
+	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
+	if (res)
+		return res;
+	return phy.state;
+}
+
+/**
+ * tb_wait_for_port() - wait for a port to become ready
+ *
+ * Wait up to 1 second for a port to reach state TB_PORT_UP. If
+ * wait_if_unplugged is set then we also wait if the port is in state
+ * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
+ * switch resume). Otherwise we only wait if a device is registered but the link
+ * has not yet been established.
+ *
+ * Return: Returns an error code on failure. Returns 0 if the port is not
+ * connected or failed to reach state TB_PORT_UP within one second. Returns 1
+ * if the port is connected and in state TB_PORT_UP.
+ */
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
+{
+	int retries = 10;
+	int state;
+	if (!port->cap_phy) {
+		tb_port_WARN(port, "does not have PHY\n");
+		return -EINVAL;
+	}
+	if (tb_is_upstream_port(port)) {
+		tb_port_WARN(port, "is the upstream port\n");
+		return -EINVAL;
+	}
+
+	while (retries--) {
+		state = tb_port_state(port);
+		if (state < 0)
+			return state;
+		if (state == TB_PORT_DISABLED) {
+			tb_port_info(port, "is disabled (state: 0)\n");
+			return 0;
+		}
+		if (state == TB_PORT_UNPLUGGED) {
+			if (wait_if_unplugged) {
+				/* used during resume */
+				tb_port_info(port,
+					     "is unplugged (state: 7), retrying...\n");
+				msleep(100);
+				continue;
+			}
+			tb_port_info(port, "is unplugged (state: 7)\n");
+			return 0;
+		}
+		if (state == TB_PORT_UP) {
+			tb_port_info(port,
+				     "is connected, link is up (state: 2)\n");
+			return 1;
+		}
+
+		/*
+		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
+		 * time.
+		 */
+		tb_port_info(port,
+			     "is connected, link is not up (state: %d), retrying...\n",
+			     state);
+		msleep(100);
+	}
+	tb_port_warn(port,
+		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
+	return 0;
+}
+
+/**
+ * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
+ *
+ * Change the number of NFC credits allocated to @port by @credits. To remove
+ * NFC credits pass a negative amount of credits.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_add_nfc_credits(struct tb_port *port, int credits)
+{
+	if (credits == 0)
+		return 0;
+	tb_port_info(port,
+		     "adding %#x NFC credits (%#x -> %#x)",
+		     credits,
+		     port->config.nfc_credits,
+		     port->config.nfc_credits + credits);
+	port->config.nfc_credits += credits;
+	return tb_port_write(port, &port->config.nfc_credits,
+			     TB_CFG_PORT, 4, 1);
+}
+
+/**
+ * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_clear_counter(struct tb_port *port, int counter)
+{
+	u32 zero[3] = { 0, 0, 0 };
+	tb_port_info(port, "clearing counter %d\n", counter);
+	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
+}
+
+/**
+ * tb_init_port() - initialize a port
+ *
+ * This is a helper method for tb_switch_alloc. Does not check or initialize
+ * any downstream switches.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_init_port(struct tb_port *port)
+{
+	int res;
+	int cap;
+
+	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
+	if (res)
+		return res;
+
+	/* Port 0 is the switch itself and has no PHY. */
+	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
+		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
+
+		if (cap > 0)
+			port->cap_phy = cap;
+		else
+			tb_port_WARN(port, "non switch port without a PHY\n");
+	}
+
+	tb_dump_port(port->sw->tb, &port->config);
+
+	/* TODO: Read dual link port, DP port and more from EEPROM. */
+	return 0;
+
+}
+
+/* switch utility functions */
+
+static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+{
+	tb_info(tb,
+		" Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+		sw->vendor_id, sw->device_id, sw->revision,
+		sw->thunderbolt_version);
+	tb_info(tb, "  Max Port Number: %d\n", sw->max_port_number);
+	tb_info(tb, "  Config:\n");
+	tb_info(tb,
+		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
+		sw->upstream_port_number, sw->depth,
+		(((u64) sw->route_hi) << 32) | sw->route_lo,
+		sw->enabled, sw->plug_events_delay);
+	tb_info(tb,
+		"   unknown1: %#x unknown4: %#x\n",
+		sw->__unknown1, sw->__unknown4);
+}
+
+/**
+ * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_reset(struct tb *tb, u64 route)
+{
+	struct tb_cfg_result res;
+	struct tb_regs_switch_header header = {
+		header.route_hi = route >> 32,
+		header.route_lo = route,
+		header.enabled = true,
+	};
+	tb_info(tb, "resetting switch at %llx\n", route);
+	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
+			0, 2, 2, 2);
+	if (res.err)
+		return res.err;
+	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
+	if (res.err > 0)
+		return -EIO;
+	return res.err;
+}
+
+struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
+{
+	u8 next_port = route; /*
+			       * Routes use a stride of 8 bits,
+			       * eventhough a port index has 6 bits at most.
+			       * */
+	if (route == 0)
+		return sw;
+	if (next_port > sw->config.max_port_number)
+		return NULL;
+	if (tb_is_upstream_port(&sw->ports[next_port]))
+		return NULL;
+	if (!sw->ports[next_port].remote)
+		return NULL;
+	return get_switch_at_route(sw->ports[next_port].remote->sw,
+				   route >> TB_ROUTE_SHIFT);
+}
+
+/**
+ * tb_plug_events_active() - enable/disable plug events on a switch
+ *
+ * Also configures a sane plug_events_delay of 255ms.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_plug_events_active(struct tb_switch *sw, bool active)
+{
+	u32 data;
+	int res;
+
+	if (!sw->config.enabled)
+		return 0;
+
+	sw->config.plug_events_delay = 0xff;
+	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
+	if (res)
+		return res;
+
+	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
+	if (res)
+		return res;
+
+	if (active) {
+		data = data & 0xFFFFFF83;
+		switch (sw->config.device_id) {
+		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+			break;
+		default:
+			data |= 4;
+		}
+	} else {
+		data = data | 0x7c;
+	}
+	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
+			   sw->cap_plug_events + 1, 1);
+}
+
+static ssize_t authorized_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->authorized);
+}
+
+static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+{
+	int ret = -EINVAL;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->authorized)
+		goto unlock;
+
+	/*
+	 * Make sure there is no PCIe rescan ongoing when a new PCIe
+	 * tunnel is created. Otherwise the PCIe rescan code might find
+	 * the new tunnel too early.
+	 */
+	pci_lock_rescan_remove();
+	pm_runtime_get_sync(&sw->dev);
+
+	switch (val) {
+	/* Approve switch */
+	case 1:
+		if (sw->key)
+			ret = tb_domain_approve_switch_key(sw->tb, sw);
+		else
+			ret = tb_domain_approve_switch(sw->tb, sw);
+		break;
+
+	/* Challenge switch */
+	case 2:
+		if (sw->key)
+			ret = tb_domain_challenge_switch_key(sw->tb, sw);
+		break;
+
+	default:
+		break;
+	}
+
+	pm_runtime_mark_last_busy(&sw->dev);
+	pm_runtime_put_autosuspend(&sw->dev);
+	pci_unlock_rescan_remove();
+
+	if (!ret) {
+		sw->authorized = val;
+		/* Notify status change to the userspace */
+		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+	}
+
+unlock:
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+
+static ssize_t authorized_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	unsigned int val;
+	ssize_t ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret)
+		return ret;
+	if (val > 2)
+		return -EINVAL;
+
+	ret = tb_switch_set_authorized(sw, val);
+
+	return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(authorized);
+
+static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->boot);
+}
+static DEVICE_ATTR_RO(boot);
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%#x\n", sw->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	ssize_t ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->key)
+		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+	else
+		ret = sprintf(buf, "\n");
+
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+
+static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	u8 key[TB_SWITCH_KEY_SIZE];
+	ssize_t ret = count;
+	bool clear = false;
+
+	if (!strcmp(buf, "\n"))
+		clear = true;
+	else if (hex2bin(key, buf, sizeof(key)))
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->authorized) {
+		ret = -EBUSY;
+	} else {
+		kfree(sw->key);
+		if (clear) {
+			sw->key = NULL;
+		} else {
+			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+			if (!sw->key)
+				ret = -ENOMEM;
+		}
+	}
+
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+static DEVICE_ATTR(key, 0600, key_show, key_store);
+
+static void nvm_authenticate_start(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	/*
+	 * During host router NVM upgrade we should not allow root port to
+	 * go into D3cold because some root ports cannot trigger PME
+	 * itself. To be on the safe side keep the root port in D0 during
+	 * the whole upgrade process.
+	 */
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_put(&root_port->dev);
+}
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	u32 status;
+
+	nvm_get_auth_status(sw, &status);
+	return sprintf(buf, "%#x\n", status);
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	bool val;
+	int ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	/* If NVMem devices are not yet added */
+	if (!sw->nvm) {
+		ret = -EAGAIN;
+		goto exit_unlock;
+	}
+
+	ret = kstrtobool(buf, &val);
+	if (ret)
+		goto exit_unlock;
+
+	/* Always clear the authentication status */
+	nvm_clear_auth_status(sw);
+
+	if (val) {
+		if (!sw->nvm->buf) {
+			ret = -EINVAL;
+			goto exit_unlock;
+		}
+
+		pm_runtime_get_sync(&sw->dev);
+		ret = nvm_validate_and_write(sw);
+		if (ret) {
+			pm_runtime_mark_last_busy(&sw->dev);
+			pm_runtime_put_autosuspend(&sw->dev);
+			goto exit_unlock;
+		}
+
+		sw->nvm->authenticating = true;
+
+		if (!tb_route(sw)) {
+			/*
+			 * Keep root port from suspending as long as the
+			 * NVM upgrade process is running.
+			 */
+			nvm_authenticate_start(sw);
+			ret = nvm_authenticate_host(sw);
+			if (ret)
+				nvm_authenticate_complete(sw);
+		} else {
+			ret = nvm_authenticate_device(sw);
+		}
+		pm_runtime_mark_last_busy(&sw->dev);
+		pm_runtime_put_autosuspend(&sw->dev);
+	}
+
+exit_unlock:
+	mutex_unlock(&switch_lock);
+
+	if (ret)
+		return ret;
+	return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	int ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->safe_mode)
+		ret = -ENODATA;
+	else if (!sw->nvm)
+		ret = -EAGAIN;
+	else
+		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+	mutex_unlock(&switch_lock);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%#x\n", sw->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%pUb\n", sw->uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *switch_attrs[] = {
+	&dev_attr_authorized.attr,
+	&dev_attr_boot.attr,
+	&dev_attr_device.attr,
+	&dev_attr_device_name.attr,
+	&dev_attr_key.attr,
+	&dev_attr_nvm_authenticate.attr,
+	&dev_attr_nvm_version.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_vendor_name.attr,
+	&dev_attr_unique_id.attr,
+	NULL,
+};
+
+static umode_t switch_attr_is_visible(struct kobject *kobj,
+				      struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	if (attr == &dev_attr_key.attr) {
+		if (tb_route(sw) &&
+		    sw->tb->security_level == TB_SECURITY_SECURE &&
+		    sw->security_level == TB_SECURITY_SECURE)
+			return attr->mode;
+		return 0;
+	} else if (attr == &dev_attr_nvm_authenticate.attr ||
+		   attr == &dev_attr_nvm_version.attr) {
+		if (sw->dma_port)
+			return attr->mode;
+		return 0;
+	} else if (attr == &dev_attr_boot.attr) {
+		if (tb_route(sw))
+			return attr->mode;
+		return 0;
+	}
+
+	return sw->safe_mode ? 0 : attr->mode;
+}
+
+static struct attribute_group switch_group = {
+	.is_visible = switch_attr_is_visible,
+	.attrs = switch_attrs,
+};
+
+static const struct attribute_group *switch_groups[] = {
+	&switch_group,
+	NULL,
+};
+
+static void tb_switch_release(struct device *dev)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	dma_port_free(sw->dma_port);
+
+	kfree(sw->uuid);
+	kfree(sw->device_name);
+	kfree(sw->vendor_name);
+	kfree(sw->ports);
+	kfree(sw->drom);
+	kfree(sw->key);
+	kfree(sw);
+}
+
+/*
+ * Currently only need to provide the callbacks. Everything else is handled
+ * in the connection manager.
+ */
+static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops tb_switch_pm_ops = {
+	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
+			   NULL)
+};
+
+struct device_type tb_switch_type = {
+	.name = "thunderbolt_device",
+	.release = tb_switch_release,
+	.pm = &tb_switch_pm_ops,
+};
+
+static int tb_switch_get_generation(struct tb_switch *sw)
+{
+	switch (sw->config.device_id) {
+	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
+		return 1;
+
+	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+		return 2;
+
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+		return 3;
+
+	default:
+		/*
+		 * For unknown switches assume generation to be 1 to be
+		 * on the safe side.
+		 */
+		tb_sw_warn(sw, "unsupported switch device id %#x\n",
+			   sw->config.device_id);
+		return 1;
+	}
+}
+
+/**
+ * tb_switch_alloc() - allocate a switch
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * Allocates and initializes a switch. Will not upload configuration to
+ * the switch. For that you need to call tb_switch_configure()
+ * separately. The returned switch should be released by calling
+ * tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
+ */
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+				  u64 route)
+{
+	int i;
+	int cap;
+	struct tb_switch *sw;
+	int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
+	if (upstream_port < 0)
+		return NULL;
+
+	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+	if (!sw)
+		return NULL;
+
+	sw->tb = tb;
+	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
+		goto err_free_sw_ports;
+
+	tb_info(tb, "current switch config:\n");
+	tb_dump_switch(tb, &sw->config);
+
+	/* configure switch */
+	sw->config.upstream_port_number = upstream_port;
+	sw->config.depth = tb_route_length(route);
+	sw->config.route_lo = route;
+	sw->config.route_hi = route >> 32;
+	sw->config.enabled = 0;
+
+	/* initialize ports */
+	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
+				GFP_KERNEL);
+	if (!sw->ports)
+		goto err_free_sw_ports;
+
+	for (i = 0; i <= sw->config.max_port_number; i++) {
+		/* minimum setup for tb_find_cap and tb_drom_read to work */
+		sw->ports[i].sw = sw;
+		sw->ports[i].port = i;
+	}
+
+	sw->generation = tb_switch_get_generation(sw);
+
+	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+	if (cap < 0) {
+		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+		goto err_free_sw_ports;
+	}
+	sw->cap_plug_events = cap;
+
+	/* Root switch is always authorized */
+	if (!route)
+		sw->authorized = true;
+
+	device_initialize(&sw->dev);
+	sw->dev.parent = parent;
+	sw->dev.bus = &tb_bus_type;
+	sw->dev.type = &tb_switch_type;
+	sw->dev.groups = switch_groups;
+	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+	return sw;
+
+err_free_sw_ports:
+	kfree(sw->ports);
+	kfree(sw);
+
+	return NULL;
+}
+
+/**
+ * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * This creates a switch in safe mode. This means the switch pretty much
+ * lacks all capabilities except DMA configuration port before it is
+ * flashed with a valid NVM firmware.
+ *
+ * The returned switch must be released by calling tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
+ */
+struct tb_switch *
+tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
+{
+	struct tb_switch *sw;
+
+	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+	if (!sw)
+		return NULL;
+
+	sw->tb = tb;
+	sw->config.depth = tb_route_length(route);
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
+	sw->safe_mode = true;
+
+	device_initialize(&sw->dev);
+	sw->dev.parent = parent;
+	sw->dev.bus = &tb_bus_type;
+	sw->dev.type = &tb_switch_type;
+	sw->dev.groups = switch_groups;
+	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+	return sw;
+}
+
+/**
+ * tb_switch_configure() - Uploads configuration to the switch
+ * @sw: Switch to configure
+ *
+ * Call this function before the switch is added to the system. It will
+ * upload configuration to the switch and makes it available for the
+ * connection manager to use.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_configure(struct tb_switch *sw)
+{
+	struct tb *tb = sw->tb;
+	u64 route;
+	int ret;
+
+	route = tb_route(sw);
+	tb_info(tb,
+		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
+		route, tb_route_length(route), sw->config.upstream_port_number);
+
+	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+			   sw->config.vendor_id);
+
+	sw->config.enabled = 1;
+
+	/* upload configuration */
+	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
+	if (ret)
+		return ret;
+
+	return tb_plug_events_active(sw, true);
+}
+
+static void tb_switch_set_uuid(struct tb_switch *sw)
+{
+	u32 uuid[4];
+	int cap;
+
+	if (sw->uuid)
+		return;
+
+	/*
+	 * The newer controllers include fused UUID as part of link
+	 * controller specific registers
+	 */
+	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+	if (cap > 0) {
+		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
+	} else {
+		/*
+		 * ICM generates UUID based on UID and fills the upper
+		 * two words with ones. This is not strictly following
+		 * UUID format but we want to be compatible with it so
+		 * we do the same here.
+		 */
+		uuid[0] = sw->uid & 0xffffffff;
+		uuid[1] = (sw->uid >> 32) & 0xffffffff;
+		uuid[2] = 0xffffffff;
+		uuid[3] = 0xffffffff;
+	}
+
+	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static int tb_switch_add_dma_port(struct tb_switch *sw)
+{
+	u32 status;
+	int ret;
+
+	switch (sw->generation) {
+	case 3:
+		break;
+
+	case 2:
+		/* Only root switch can be upgraded */
+		if (tb_route(sw))
+			return 0;
+		break;
+
+	default:
+		/*
+		 * DMA port is the only thing available when the switch
+		 * is in safe mode.
+		 */
+		if (!sw->safe_mode)
+			return 0;
+		break;
+	}
+
+	if (sw->no_nvm_upgrade)
+		return 0;
+
+	sw->dma_port = dma_port_alloc(sw);
+	if (!sw->dma_port)
+		return 0;
+
+	/*
+	 * Check status of the previous flash authentication. If there
+	 * is one we need to power cycle the switch in any case to make
+	 * it functional again.
+	 */
+	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+	if (ret <= 0)
+		return ret;
+
+	/* Now we can allow root port to suspend again */
+	if (!tb_route(sw))
+		nvm_authenticate_complete(sw);
+
+	if (status) {
+		tb_sw_info(sw, "switch flash authentication failed\n");
+		tb_switch_set_uuid(sw);
+		nvm_set_auth_status(sw, status);
+	}
+
+	tb_sw_info(sw, "power cycling the switch now\n");
+	dma_port_power_cycle(sw->dma_port);
+
+	/*
+	 * We return error here which causes the switch adding failure.
+	 * It should appear back after power cycle is complete.
+	 */
+	return -ESHUTDOWN;
+}
+
+/**
+ * tb_switch_add() - Add a switch to the domain
+ * @sw: Switch to add
+ *
+ * This is the last step in adding switch to the domain. It will read
+ * identification information from DROM and initializes ports so that
+ * they can be used to connect other switches. The switch will be
+ * exposed to the userspace when this function successfully returns. To
+ * remove and release the switch, call tb_switch_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_add(struct tb_switch *sw)
+{
+	int i, ret;
+
+	/*
+	 * Initialize DMA control port now before we read DROM. Recent
+	 * host controllers have more complete DROM on NVM that includes
+	 * vendor and model identification strings which we then expose
+	 * to the userspace. NVM can be accessed through DMA
+	 * configuration based mailbox.
+	 */
+	ret = tb_switch_add_dma_port(sw);
+	if (ret)
+		return ret;
+
+	if (!sw->safe_mode) {
+		/* read drom */
+		ret = tb_drom_read(sw);
+		if (ret) {
+			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+			return ret;
+		}
+		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+
+		tb_switch_set_uuid(sw);
+
+		for (i = 0; i <= sw->config.max_port_number; i++) {
+			if (sw->ports[i].disabled) {
+				tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+				continue;
+			}
+			ret = tb_init_port(&sw->ports[i]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = device_add(&sw->dev);
+	if (ret)
+		return ret;
+
+	ret = tb_switch_nvm_add(sw);
+	if (ret) {
+		device_del(&sw->dev);
+		return ret;
+	}
+
+	pm_runtime_set_active(&sw->dev);
+	if (sw->rpm) {
+		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&sw->dev);
+		pm_runtime_mark_last_busy(&sw->dev);
+		pm_runtime_enable(&sw->dev);
+		pm_request_autosuspend(&sw->dev);
+	}
+
+	return 0;
+}
+
+/**
+ * tb_switch_remove() - Remove and release a switch
+ * @sw: Switch to remove
+ *
+ * This will remove the switch from the domain and release it after last
+ * reference count drops to zero. If there are switches connected below
+ * this switch, they will be removed as well.
+ */
+void tb_switch_remove(struct tb_switch *sw)
+{
+	int i;
+
+	if (sw->rpm) {
+		pm_runtime_get_sync(&sw->dev);
+		pm_runtime_disable(&sw->dev);
+	}
+
+	/* port 0 is the switch itself and never has a remote */
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (tb_is_upstream_port(&sw->ports[i]))
+			continue;
+		if (sw->ports[i].remote)
+			tb_switch_remove(sw->ports[i].remote->sw);
+		sw->ports[i].remote = NULL;
+		if (sw->ports[i].xdomain)
+			tb_xdomain_remove(sw->ports[i].xdomain);
+		sw->ports[i].xdomain = NULL;
+	}
+
+	if (!sw->is_unplugged)
+		tb_plug_events_active(sw, false);
+
+	tb_switch_nvm_remove(sw);
+	device_unregister(&sw->dev);
+}
+
+/**
+ * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
+ */
+void tb_sw_set_unplugged(struct tb_switch *sw)
+{
+	int i;
+	if (sw == sw->tb->root_switch) {
+		tb_sw_WARN(sw, "cannot unplug root switch\n");
+		return;
+	}
+	if (sw->is_unplugged) {
+		tb_sw_WARN(sw, "is_unplugged already set\n");
+		return;
+	}
+	sw->is_unplugged = true;
+	for (i = 0; i <= sw->config.max_port_number; i++) {
+		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+			tb_sw_set_unplugged(sw->ports[i].remote->sw);
+	}
+}
+
+int tb_switch_resume(struct tb_switch *sw)
+{
+	int i, err;
+	tb_sw_info(sw, "resuming switch\n");
+
+	/*
+	 * Check for UID of the connected switches except for root
+	 * switch which we assume cannot be removed.
+	 */
+	if (tb_route(sw)) {
+		u64 uid;
+
+		err = tb_drom_read_uid_only(sw, &uid);
+		if (err) {
+			tb_sw_warn(sw, "uid read failed\n");
+			return err;
+		}
+		if (sw->uid != uid) {
+			tb_sw_info(sw,
+				"changed while suspended (uid %#llx -> %#llx)\n",
+				sw->uid, uid);
+			return -ENODEV;
+		}
+	}
+
+	/* upload configuration */
+	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
+	if (err)
+		return err;
+
+	err = tb_plug_events_active(sw, true);
+	if (err)
+		return err;
+
+	/* check for surviving downstream switches */
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+		if (tb_is_upstream_port(port))
+			continue;
+		if (!port->remote)
+			continue;
+		if (tb_wait_for_port(port, true) <= 0
+			|| tb_switch_resume(port->remote->sw)) {
+			tb_port_warn(port,
+				     "lost during suspend, disconnecting\n");
+			tb_sw_set_unplugged(port->remote->sw);
+		}
+	}
+	return 0;
+}
+
+void tb_switch_suspend(struct tb_switch *sw)
+{
+	int i, err;
+	err = tb_plug_events_active(sw, false);
+	if (err)
+		return;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+			tb_switch_suspend(sw->ports[i].remote->sw);
+	}
+	/*
+	 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
+	 * effect?
+	 */
+}
+
+struct tb_sw_lookup {
+	struct tb *tb;
+	u8 link;
+	u8 depth;
+	const uuid_t *uuid;
+	u64 route;
+};
+
+static int tb_switch_match(struct device *dev, void *data)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	struct tb_sw_lookup *lookup = data;
+
+	if (!sw)
+		return 0;
+	if (sw->tb != lookup->tb)
+		return 0;
+
+	if (lookup->uuid)
+		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
+
+	if (lookup->route) {
+		return sw->config.route_lo == lower_32_bits(lookup->route) &&
+		       sw->config.route_hi == upper_32_bits(lookup->route);
+	}
+
+	/* Root switch is matched only by depth */
+	if (!lookup->depth)
+		return !sw->depth;
+
+	return sw->link == lookup->link && sw->depth == lookup->depth;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by link and depth
+ * @tb: Domain the switch belongs
+ * @link: Link number the switch is connected
+ * @depth: Depth of the switch in link
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.link = link;
+	lookup.depth = depth;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
+/**
+ * tb_switch_find_by_uuid() - Find switch by UUID
+ * @tb: Domain the switch belongs
+ * @uuid: UUID to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.uuid = uuid;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
+/**
+ * tb_switch_find_by_route() - Find switch by route string
+ * @tb: Domain the switch belongs
+ * @route: Route string to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	if (!route)
+		return tb_switch_get(tb->root_switch);
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.route = route;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
+void tb_switch_exit(void)
+{
+	ida_destroy(&nvm_ida);
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
new file mode 100644
index 0000000..1424581
--- /dev/null
+++ b/drivers/thunderbolt/tb.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_data/x86/apple.h>
+
+#include "tb.h"
+#include "tb_regs.h"
+#include "tunnel_pci.h"
+
+/**
+ * struct tb_cm - Simple Thunderbolt connection manager
+ * @tunnel_list: List of active tunnels
+ * @hotplug_active: tb_handle_hotplug will stop progressing plug
+ *		    events and exit if this is not set (it needs to
+ *		    acquire the lock one more time). Used to drain wq
+ *		    after cfg has been paused.
+ */
+struct tb_cm {
+	struct list_head tunnel_list;
+	bool hotplug_active;
+};
+
+/* enumeration & hot plug handling */
+
+
+static void tb_scan_port(struct tb_port *port);
+
+/**
+ * tb_scan_switch() - scan for and initialize downstream switches
+ */
+static void tb_scan_switch(struct tb_switch *sw)
+{
+	int i;
+	for (i = 1; i <= sw->config.max_port_number; i++)
+		tb_scan_port(&sw->ports[i]);
+}
+
+/**
+ * tb_scan_port() - check for and initialize switches below port
+ */
+static void tb_scan_port(struct tb_port *port)
+{
+	struct tb_switch *sw;
+	if (tb_is_upstream_port(port))
+		return;
+	if (port->config.type != TB_TYPE_PORT)
+		return;
+	if (port->dual_link_port && port->link_nr)
+		return; /*
+			 * Downstream switch is reachable through two ports.
+			 * Only scan on the primary port (link_nr == 0).
+			 */
+	if (tb_wait_for_port(port, false) <= 0)
+		return;
+	if (port->remote) {
+		tb_port_WARN(port, "port already has a remote!\n");
+		return;
+	}
+	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+			     tb_downstream_route(port));
+	if (!sw)
+		return;
+
+	if (tb_switch_configure(sw)) {
+		tb_switch_put(sw);
+		return;
+	}
+
+	sw->authorized = true;
+
+	if (tb_switch_add(sw)) {
+		tb_switch_put(sw);
+		return;
+	}
+
+	port->remote = tb_upstream_port(sw);
+	tb_upstream_port(sw)->remote = port;
+	tb_scan_switch(sw);
+}
+
+/**
+ * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
+ */
+static void tb_free_invalid_tunnels(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_pci_tunnel *tunnel;
+	struct tb_pci_tunnel *n;
+
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+		if (tb_pci_is_invalid(tunnel)) {
+			tb_pci_deactivate(tunnel);
+			list_del(&tunnel->list);
+			tb_pci_free(tunnel);
+		}
+	}
+}
+
+/**
+ * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
+ */
+static void tb_free_unplugged_children(struct tb_switch *sw)
+{
+	int i;
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+		if (tb_is_upstream_port(port))
+			continue;
+		if (!port->remote)
+			continue;
+		if (port->remote->sw->is_unplugged) {
+			tb_switch_remove(port->remote->sw);
+			port->remote = NULL;
+		} else {
+			tb_free_unplugged_children(port->remote->sw);
+		}
+	}
+}
+
+
+/**
+ * find_pci_up_port() - return the first PCIe up port on @sw or NULL
+ */
+static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
+{
+	int i;
+	for (i = 1; i <= sw->config.max_port_number; i++)
+		if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+			return &sw->ports[i];
+	return NULL;
+}
+
+/**
+ * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ */
+static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
+{
+	int i;
+	int cap;
+	int res;
+	int data;
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (tb_is_upstream_port(&sw->ports[i]))
+			continue;
+		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
+			continue;
+		cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
+		if (cap < 0)
+			continue;
+		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
+		if (res < 0)
+			continue;
+		if (data & 0x80000000)
+			continue;
+		return &sw->ports[i];
+	}
+	return NULL;
+}
+
+/**
+ * tb_activate_pcie_devices() - scan for and activate PCIe devices
+ *
+ * This method is somewhat ad hoc. For now it only supports one device
+ * per port and only devices at depth 1.
+ */
+static void tb_activate_pcie_devices(struct tb *tb)
+{
+	int i;
+	int cap;
+	u32 data;
+	struct tb_switch *sw;
+	struct tb_port *up_port;
+	struct tb_port *down_port;
+	struct tb_pci_tunnel *tunnel;
+	struct tb_cm *tcm = tb_priv(tb);
+
+	/* scan for pcie devices at depth 1*/
+	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
+		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
+			continue;
+		if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
+			continue;
+		if (!tb->root_switch->ports[i].remote)
+			continue;
+		sw = tb->root_switch->ports[i].remote->sw;
+		up_port = tb_find_pci_up_port(sw);
+		if (!up_port) {
+			tb_sw_info(sw, "no PCIe devices found, aborting\n");
+			continue;
+		}
+
+		/* check whether port is already activated */
+		cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
+		if (cap < 0)
+			continue;
+		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
+			continue;
+		if (data & 0x80000000) {
+			tb_port_info(up_port,
+				     "PCIe port already activated, aborting\n");
+			continue;
+		}
+
+		down_port = tb_find_unused_down_port(tb->root_switch);
+		if (!down_port) {
+			tb_port_info(up_port,
+				     "All PCIe down ports are occupied, aborting\n");
+			continue;
+		}
+		tunnel = tb_pci_alloc(tb, up_port, down_port);
+		if (!tunnel) {
+			tb_port_info(up_port,
+				     "PCIe tunnel allocation failed, aborting\n");
+			continue;
+		}
+
+		if (tb_pci_activate(tunnel)) {
+			tb_port_info(up_port,
+				     "PCIe tunnel activation failed, aborting\n");
+			tb_pci_free(tunnel);
+			continue;
+		}
+
+		list_add(&tunnel->list, &tcm->tunnel_list);
+	}
+}
+
+/* hotplug handling */
+
+struct tb_hotplug_event {
+	struct work_struct work;
+	struct tb *tb;
+	u64 route;
+	u8 port;
+	bool unplug;
+};
+
+/**
+ * tb_handle_hotplug() - handle hotplug event
+ *
+ * Executes on tb->wq.
+ */
+static void tb_handle_hotplug(struct work_struct *work)
+{
+	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+	struct tb *tb = ev->tb;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_switch *sw;
+	struct tb_port *port;
+	mutex_lock(&tb->lock);
+	if (!tcm->hotplug_active)
+		goto out; /* during init, suspend or shutdown */
+
+	sw = get_switch_at_route(tb->root_switch, ev->route);
+	if (!sw) {
+		tb_warn(tb,
+			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
+			ev->route, ev->port, ev->unplug);
+		goto out;
+	}
+	if (ev->port > sw->config.max_port_number) {
+		tb_warn(tb,
+			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
+			ev->route, ev->port, ev->unplug);
+		goto out;
+	}
+	port = &sw->ports[ev->port];
+	if (tb_is_upstream_port(port)) {
+		tb_warn(tb,
+			"hotplug event for upstream port %llx:%x (unplug: %d)\n",
+			ev->route, ev->port, ev->unplug);
+		goto out;
+	}
+	if (ev->unplug) {
+		if (port->remote) {
+			tb_port_info(port, "unplugged\n");
+			tb_sw_set_unplugged(port->remote->sw);
+			tb_free_invalid_tunnels(tb);
+			tb_switch_remove(port->remote->sw);
+			port->remote = NULL;
+		} else {
+			tb_port_info(port,
+				     "got unplug event for disconnected port, ignoring\n");
+		}
+	} else if (port->remote) {
+		tb_port_info(port,
+			     "got plug event for connected port, ignoring\n");
+	} else {
+		tb_port_info(port, "hotplug: scanning\n");
+		tb_scan_port(port);
+		if (!port->remote) {
+			tb_port_info(port, "hotplug: no switch found\n");
+		} else if (port->remote->sw->config.depth > 1) {
+			tb_sw_warn(port->remote->sw,
+				   "hotplug: chaining not supported\n");
+		} else {
+			tb_sw_info(port->remote->sw,
+				   "hotplug: activating pcie devices\n");
+			tb_activate_pcie_devices(tb);
+		}
+	}
+out:
+	mutex_unlock(&tb->lock);
+	kfree(ev);
+}
+
+/**
+ * tb_schedule_hotplug_handler() - callback function for the control channel
+ *
+ * Delegates to tb_handle_hotplug.
+ */
+static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+			    const void *buf, size_t size)
+{
+	const struct cfg_event_pkg *pkg = buf;
+	struct tb_hotplug_event *ev;
+	u64 route;
+
+	if (type != TB_CFG_PKG_EVENT) {
+		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
+		return;
+	}
+
+	route = tb_cfg_get_route(&pkg->header);
+
+	if (tb_cfg_error(tb->ctl, route, pkg->port,
+			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
+			pkg->port);
+	}
+
+	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev)
+		return;
+	INIT_WORK(&ev->work, tb_handle_hotplug);
+	ev->tb = tb;
+	ev->route = route;
+	ev->port = pkg->port;
+	ev->unplug = pkg->unplug;
+	queue_work(tb->wq, &ev->work);
+}
+
+static void tb_stop(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_pci_tunnel *tunnel;
+	struct tb_pci_tunnel *n;
+
+	/* tunnels are only present after everything has been initialized */
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+		tb_pci_deactivate(tunnel);
+		tb_pci_free(tunnel);
+	}
+	tb_switch_remove(tb->root_switch);
+	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+}
+
+static int tb_start(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	int ret;
+
+	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+	if (!tb->root_switch)
+		return -ENOMEM;
+
+	/*
+	 * ICM firmware upgrade needs running firmware and in native
+	 * mode that is not available so disable firmware upgrade of the
+	 * root switch.
+	 */
+	tb->root_switch->no_nvm_upgrade = true;
+
+	ret = tb_switch_configure(tb->root_switch);
+	if (ret) {
+		tb_switch_put(tb->root_switch);
+		return ret;
+	}
+
+	/* Announce the switch to the world */
+	ret = tb_switch_add(tb->root_switch);
+	if (ret) {
+		tb_switch_put(tb->root_switch);
+		return ret;
+	}
+
+	/* Full scan to discover devices added before the driver was loaded. */
+	tb_scan_switch(tb->root_switch);
+	tb_activate_pcie_devices(tb);
+
+	/* Allow tb_handle_hotplug to progress events */
+	tcm->hotplug_active = true;
+	return 0;
+}
+
+static int tb_suspend_noirq(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+
+	tb_info(tb, "suspending...\n");
+	tb_switch_suspend(tb->root_switch);
+	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+	tb_info(tb, "suspend finished\n");
+
+	return 0;
+}
+
+static int tb_resume_noirq(struct tb *tb)
+{
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_pci_tunnel *tunnel, *n;
+
+	tb_info(tb, "resuming...\n");
+
+	/* remove any pci devices the firmware might have setup */
+	tb_switch_reset(tb, 0);
+
+	tb_switch_resume(tb->root_switch);
+	tb_free_invalid_tunnels(tb);
+	tb_free_unplugged_children(tb->root_switch);
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+		tb_pci_restart(tunnel);
+	if (!list_empty(&tcm->tunnel_list)) {
+		/*
+		 * the pcie links need some time to get going.
+		 * 100ms works for me...
+		 */
+		tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
+		msleep(100);
+	}
+	 /* Allow tb_handle_hotplug to progress events */
+	tcm->hotplug_active = true;
+	tb_info(tb, "resume finished\n");
+
+	return 0;
+}
+
+static const struct tb_cm_ops tb_cm_ops = {
+	.start = tb_start,
+	.stop = tb_stop,
+	.suspend_noirq = tb_suspend_noirq,
+	.resume_noirq = tb_resume_noirq,
+	.handle_event = tb_handle_event,
+};
+
+struct tb *tb_probe(struct tb_nhi *nhi)
+{
+	struct tb_cm *tcm;
+	struct tb *tb;
+
+	if (!x86_apple_machine)
+		return NULL;
+
+	tb = tb_domain_alloc(nhi, sizeof(*tcm));
+	if (!tb)
+		return NULL;
+
+	tb->security_level = TB_SECURITY_NONE;
+	tb->cm_ops = &tb_cm_ops;
+
+	tcm = tb_priv(tb);
+	INIT_LIST_HEAD(&tcm->tunnel_list);
+
+	return tb;
+}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
new file mode 100644
index 0000000..5067d69
--- /dev/null
+++ b/drivers/thunderbolt/tb.h
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_H_
+#define TB_H_
+
+#include <linux/nvmem-provider.h>
+#include <linux/pci.h>
+#include <linux/thunderbolt.h>
+#include <linux/uuid.h>
+
+#include "tb_regs.h"
+#include "ctl.h"
+#include "dma_port.h"
+
+/**
+ * struct tb_switch_nvm - Structure holding switch NVM information
+ * @major: Major version number of the active NVM portion
+ * @minor: Minor version number of the active NVM portion
+ * @id: Identifier used with both NVM portions
+ * @active: Active portion NVMem device
+ * @non_active: Non-active portion NVMem device
+ * @buf: Buffer where the NVM image is stored before it is written to
+ *	 the actual NVM flash device
+ * @buf_data_size: Number of bytes actually consumed by the new NVM
+ *		   image
+ * @authenticating: The switch is authenticating the new NVM
+ */
+struct tb_switch_nvm {
+	u8 major;
+	u8 minor;
+	int id;
+	struct nvmem_device *active;
+	struct nvmem_device *non_active;
+	void *buf;
+	size_t buf_data_size;
+	bool authenticating;
+};
+
+#define TB_SWITCH_KEY_SIZE		32
+
+/**
+ * struct tb_switch - a thunderbolt switch
+ * @dev: Device for the switch
+ * @config: Switch configuration
+ * @ports: Ports in this switch
+ * @dma_port: If the switch has port supporting DMA configuration based
+ *	      mailbox this will hold the pointer to that (%NULL
+ *	      otherwise). If set it also means the switch has
+ *	      upgradeable NVM.
+ * @tb: Pointer to the domain the switch belongs to
+ * @uid: Unique ID of the switch
+ * @uuid: UUID of the switch (or %NULL if not supported)
+ * @vendor: Vendor ID of the switch
+ * @device: Device ID of the switch
+ * @vendor_name: Name of the vendor (or %NULL if not known)
+ * @device_name: Name of the device (or %NULL if not known)
+ * @generation: Switch Thunderbolt generation
+ * @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @is_unplugged: The switch is going away
+ * @drom: DROM of the switch (%NULL if not found)
+ * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this switch
+ * @safe_mode: The switch is in safe-mode
+ * @boot: Whether the switch was already authorized on boot or not
+ * @rpm: The switch supports runtime PM
+ * @authorized: Whether the switch is authorized by user or policy
+ * @work: Work used to automatically authorize a switch
+ * @security_level: Switch supported security level
+ * @key: Contains the key used to challenge the device or %NULL if not
+ *	 supported. Size of the key is %TB_SWITCH_KEY_SIZE.
+ * @connection_id: Connection ID used with ICM messaging
+ * @connection_key: Connection key used with ICM messaging
+ * @link: Root switch link this switch is connected (ICM only)
+ * @depth: Depth in the chain this switch is connected (ICM only)
+ *
+ * When the switch is being added or removed to the domain (other
+ * switches) you need to have domain lock held. For switch authorization
+ * internal switch_lock is enough.
+ */
+struct tb_switch {
+	struct device dev;
+	struct tb_regs_switch_header config;
+	struct tb_port *ports;
+	struct tb_dma_port *dma_port;
+	struct tb *tb;
+	u64 uid;
+	uuid_t *uuid;
+	u16 vendor;
+	u16 device;
+	const char *vendor_name;
+	const char *device_name;
+	unsigned int generation;
+	int cap_plug_events;
+	bool is_unplugged;
+	u8 *drom;
+	struct tb_switch_nvm *nvm;
+	bool no_nvm_upgrade;
+	bool safe_mode;
+	bool boot;
+	bool rpm;
+	unsigned int authorized;
+	struct work_struct work;
+	enum tb_security_level security_level;
+	u8 *key;
+	u8 connection_id;
+	u8 connection_key;
+	u8 link;
+	u8 depth;
+};
+
+/**
+ * struct tb_port - a thunderbolt port, part of a tb_switch
+ * @config: Cached port configuration read from registers
+ * @sw: Switch the port belongs to
+ * @remote: Remote port (%NULL if not connected)
+ * @xdomain: Remote host (%NULL if not connected)
+ * @cap_phy: Offset, zero if not found
+ * @port: Port number on switch
+ * @disabled: Disabled by eeprom
+ * @dual_link_port: If the switch is connected using two ports, points
+ *		    to the other port.
+ * @link_nr: Is this primary or secondary port on the dual_link.
+ */
+struct tb_port {
+	struct tb_regs_port_header config;
+	struct tb_switch *sw;
+	struct tb_port *remote;
+	struct tb_xdomain *xdomain;
+	int cap_phy;
+	u8 port;
+	bool disabled;
+	struct tb_port *dual_link_port;
+	u8 link_nr:1;
+};
+
+/**
+ * struct tb_path_hop - routing information for a tb_path
+ *
+ * Hop configuration is always done on the IN port of a switch.
+ * in_port and out_port have to be on the same switch. Packets arriving on
+ * in_port with "hop" = in_hop_index will get routed to through out_port. The
+ * next hop to take (on out_port->remote) is determined by next_hop_index.
+ *
+ * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
+ * port.
+ */
+struct tb_path_hop {
+	struct tb_port *in_port;
+	struct tb_port *out_port;
+	int in_hop_index;
+	int in_counter_index; /* write -1 to disable counters for this hop. */
+	int next_hop_index;
+};
+
+/**
+ * enum tb_path_port - path options mask
+ */
+enum tb_path_port {
+	TB_PATH_NONE = 0,
+	TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
+	TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
+	TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
+	TB_PATH_ALL = 7,
+};
+
+/**
+ * struct tb_path - a unidirectional path between two ports
+ *
+ * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
+ * tunnel two paths have to be created between the two PCIe ports.
+ *
+ */
+struct tb_path {
+	struct tb *tb;
+	int nfc_credits; /* non flow controlled credits */
+	enum tb_path_port ingress_shared_buffer;
+	enum tb_path_port egress_shared_buffer;
+	enum tb_path_port ingress_fc_enable;
+	enum tb_path_port egress_fc_enable;
+
+	int priority:3;
+	int weight:4;
+	bool drop_packages;
+	bool activated;
+	struct tb_path_hop *hops;
+	int path_length; /* number of hops */
+};
+
+/**
+ * struct tb_cm_ops - Connection manager specific operations vector
+ * @driver_ready: Called right after control channel is started. Used by
+ *		  ICM to send driver ready message to the firmware.
+ * @start: Starts the domain
+ * @stop: Stops the domain
+ * @suspend_noirq: Connection manager specific suspend_noirq
+ * @resume_noirq: Connection manager specific resume_noirq
+ * @suspend: Connection manager specific suspend
+ * @complete: Connection manager specific complete
+ * @runtime_suspend: Connection manager specific runtime_suspend
+ * @runtime_resume: Connection manager specific runtime_resume
+ * @handle_event: Handle thunderbolt event
+ * @get_boot_acl: Get boot ACL list
+ * @set_boot_acl: Set boot ACL list
+ * @approve_switch: Approve switch
+ * @add_switch_key: Add key to switch
+ * @challenge_switch_key: Challenge switch using key
+ * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
+ * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
+ */
+struct tb_cm_ops {
+	int (*driver_ready)(struct tb *tb);
+	int (*start)(struct tb *tb);
+	void (*stop)(struct tb *tb);
+	int (*suspend_noirq)(struct tb *tb);
+	int (*resume_noirq)(struct tb *tb);
+	int (*suspend)(struct tb *tb);
+	void (*complete)(struct tb *tb);
+	int (*runtime_suspend)(struct tb *tb);
+	int (*runtime_resume)(struct tb *tb);
+	void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
+			     const void *buf, size_t size);
+	int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
+	int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
+	int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
+	int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
+	int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
+				    const u8 *challenge, u8 *response);
+	int (*disconnect_pcie_paths)(struct tb *tb);
+	int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+	int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+};
+
+static inline void *tb_priv(struct tb *tb)
+{
+	return (void *)tb->privdata;
+}
+
+#define TB_AUTOSUSPEND_DELAY		15000 /* ms */
+
+/* helper functions & macros */
+
+/**
+ * tb_upstream_port() - return the upstream port of a switch
+ *
+ * Every switch has an upstream port (for the root switch it is the NHI).
+ *
+ * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
+ * non root switches (on the NHI port remote is always NULL).
+ *
+ * Return: Returns the upstream port of the switch.
+ */
+static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
+{
+	return &sw->ports[sw->config.upstream_port_number];
+}
+
+static inline u64 tb_route(struct tb_switch *sw)
+{
+	return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
+}
+
+static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
+{
+	u8 port;
+
+	port = route >> (sw->config.depth * 8);
+	if (WARN_ON(port > sw->config.max_port_number))
+		return NULL;
+	return &sw->ports[port];
+}
+
+static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
+			     enum tb_cfg_space space, u32 offset, u32 length)
+{
+	return tb_cfg_read(sw->tb->ctl,
+			   buffer,
+			   tb_route(sw),
+			   0,
+			   space,
+			   offset,
+			   length);
+}
+
+static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+			      enum tb_cfg_space space, u32 offset, u32 length)
+{
+	return tb_cfg_write(sw->tb->ctl,
+			    buffer,
+			    tb_route(sw),
+			    0,
+			    space,
+			    offset,
+			    length);
+}
+
+static inline int tb_port_read(struct tb_port *port, void *buffer,
+			       enum tb_cfg_space space, u32 offset, u32 length)
+{
+	return tb_cfg_read(port->sw->tb->ctl,
+			   buffer,
+			   tb_route(port->sw),
+			   port->port,
+			   space,
+			   offset,
+			   length);
+}
+
+static inline int tb_port_write(struct tb_port *port, const void *buffer,
+				enum tb_cfg_space space, u32 offset, u32 length)
+{
+	return tb_cfg_write(port->sw->tb->ctl,
+			    buffer,
+			    tb_route(port->sw),
+			    port->port,
+			    space,
+			    offset,
+			    length);
+}
+
+#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
+
+
+#define __TB_SW_PRINT(level, sw, fmt, arg...)           \
+	do {                                            \
+		struct tb_switch *__sw = (sw);          \
+		level(__sw->tb, "%llx: " fmt,           \
+		      tb_route(__sw), ## arg);          \
+	} while (0)
+#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
+#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
+#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
+
+
+#define __TB_PORT_PRINT(level, _port, fmt, arg...)                      \
+	do {                                                            \
+		struct tb_port *__port = (_port);                       \
+		level(__port->sw->tb, "%llx:%x: " fmt,                  \
+		      tb_route(__port->sw), __port->port, ## arg);      \
+	} while (0)
+#define tb_port_WARN(port, fmt, arg...) \
+	__TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
+#define tb_port_warn(port, fmt, arg...) \
+	__TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
+#define tb_port_info(port, fmt, arg...) \
+	__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+
+struct tb *icm_probe(struct tb_nhi *nhi);
+struct tb *tb_probe(struct tb_nhi *nhi);
+
+extern struct device_type tb_domain_type;
+extern struct device_type tb_switch_type;
+
+int tb_domain_init(void);
+void tb_domain_exit(void);
+void tb_switch_exit(void);
+int tb_xdomain_init(void);
+void tb_xdomain_exit(void);
+
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
+int tb_domain_add(struct tb *tb);
+void tb_domain_remove(struct tb *tb);
+int tb_domain_suspend_noirq(struct tb *tb);
+int tb_domain_resume_noirq(struct tb *tb);
+int tb_domain_suspend(struct tb *tb);
+void tb_domain_complete(struct tb *tb);
+int tb_domain_runtime_suspend(struct tb *tb);
+int tb_domain_runtime_resume(struct tb *tb);
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_disconnect_pcie_paths(struct tb *tb);
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_all_paths(struct tb *tb);
+
+static inline void tb_domain_put(struct tb *tb)
+{
+	put_device(&tb->dev);
+}
+
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+				  u64 route);
+struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
+			struct device *parent, u64 route);
+int tb_switch_configure(struct tb_switch *sw);
+int tb_switch_add(struct tb_switch *sw);
+void tb_switch_remove(struct tb_switch *sw);
+void tb_switch_suspend(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_reset(struct tb *tb, u64 route);
+void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
+					       u8 depth);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+
+static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
+{
+	if (sw)
+		get_device(&sw->dev);
+	return sw;
+}
+
+static inline void tb_switch_put(struct tb_switch *sw)
+{
+	put_device(&sw->dev);
+}
+
+static inline bool tb_is_switch(const struct device *dev)
+{
+	return dev->type == &tb_switch_type;
+}
+
+static inline struct tb_switch *tb_to_switch(struct device *dev)
+{
+	if (tb_is_switch(dev))
+		return container_of(dev, struct tb_switch, dev);
+	return NULL;
+}
+
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
+int tb_port_add_nfc_credits(struct tb_port *port, int credits);
+int tb_port_clear_counter(struct tb_port *port, int counter);
+
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
+
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
+void tb_path_free(struct tb_path *path);
+int tb_path_activate(struct tb_path *path);
+void tb_path_deactivate(struct tb_path *path);
+bool tb_path_is_invalid(struct tb_path *path);
+
+int tb_drom_read(struct tb_switch *sw);
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
+
+
+static inline int tb_route_length(u64 route)
+{
+	return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
+}
+
+static inline bool tb_is_upstream_port(struct tb_port *port)
+{
+	return port == tb_upstream_port(port->sw);
+}
+
+/**
+ * tb_downstream_route() - get route to downstream switch
+ *
+ * Port must not be the upstream port (otherwise a loop is created).
+ *
+ * Return: Returns a route to the switch behind @port.
+ */
+static inline u64 tb_downstream_route(struct tb_port *port)
+{
+	return tb_route(port->sw)
+	       | ((u64) port->port << (port->sw->config.depth * 8));
+}
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+			       const void *buf, size_t size);
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+				    u64 route, const uuid_t *local_uuid,
+				    const uuid_t *remote_uuid);
+void tb_xdomain_add(struct tb_xdomain *xd);
+void tb_xdomain_remove(struct tb_xdomain *xd);
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+						 u8 depth);
+
+#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
new file mode 100644
index 0000000..2487e16
--- /dev/null
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -0,0 +1,548 @@
+/*
+ * Thunderbolt control channel messages
+ *
+ * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2017, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TB_MSGS
+#define _TB_MSGS
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+enum tb_cfg_space {
+	TB_CFG_HOPS = 0,
+	TB_CFG_PORT = 1,
+	TB_CFG_SWITCH = 2,
+	TB_CFG_COUNTERS = 3,
+};
+
+enum tb_cfg_error {
+	TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+	TB_CFG_ERROR_LINK_ERROR = 1,
+	TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+	TB_CFG_ERROR_NO_SUCH_PORT = 4,
+	TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
+	TB_CFG_ERROR_LOOP = 8,
+	TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
+	TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+};
+
+/* common header */
+struct tb_cfg_header {
+	u32 route_hi:22;
+	u32 unknown:10; /* highest order bit is set on replies */
+	u32 route_lo;
+} __packed;
+
+/* additional header for read/write packets */
+struct tb_cfg_address {
+	u32 offset:13; /* in dwords */
+	u32 length:6; /* in dwords */
+	u32 port:6;
+	enum tb_cfg_space space:2;
+	u32 seq:2; /* sequence number  */
+	u32 zero:3;
+} __packed;
+
+/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
+struct cfg_read_pkg {
+	struct tb_cfg_header header;
+	struct tb_cfg_address addr;
+} __packed;
+
+/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
+struct cfg_write_pkg {
+	struct tb_cfg_header header;
+	struct tb_cfg_address addr;
+	u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* TB_CFG_PKG_ERROR */
+struct cfg_error_pkg {
+	struct tb_cfg_header header;
+	enum tb_cfg_error error:4;
+	u32 zero1:4;
+	u32 port:6;
+	u32 zero2:2; /* Both should be zero, still they are different fields. */
+	u32 zero3:16;
+} __packed;
+
+/* TB_CFG_PKG_EVENT */
+struct cfg_event_pkg {
+	struct tb_cfg_header header;
+	u32 port:6;
+	u32 zero:25;
+	bool unplug:1;
+} __packed;
+
+/* TB_CFG_PKG_RESET */
+struct cfg_reset_pkg {
+	struct tb_cfg_header header;
+} __packed;
+
+/* TB_CFG_PKG_PREPARE_TO_SLEEP */
+struct cfg_pts_pkg {
+	struct tb_cfg_header header;
+	u32 data;
+} __packed;
+
+/* ICM messages */
+
+enum icm_pkg_code {
+	ICM_GET_TOPOLOGY = 0x1,
+	ICM_DRIVER_READY = 0x3,
+	ICM_APPROVE_DEVICE = 0x4,
+	ICM_CHALLENGE_DEVICE = 0x5,
+	ICM_ADD_DEVICE_KEY = 0x6,
+	ICM_GET_ROUTE = 0xa,
+	ICM_APPROVE_XDOMAIN = 0x10,
+	ICM_DISCONNECT_XDOMAIN = 0x11,
+	ICM_PREBOOT_ACL = 0x18,
+};
+
+enum icm_event_code {
+	ICM_EVENT_DEVICE_CONNECTED = 3,
+	ICM_EVENT_DEVICE_DISCONNECTED = 4,
+	ICM_EVENT_XDOMAIN_CONNECTED = 6,
+	ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
+};
+
+struct icm_pkg_header {
+	u8 code;
+	u8 flags;
+	u8 packet_id;
+	u8 total_packets;
+};
+
+#define ICM_FLAGS_ERROR			BIT(0)
+#define ICM_FLAGS_NO_KEY		BIT(1)
+#define ICM_FLAGS_SLEVEL_SHIFT		3
+#define ICM_FLAGS_SLEVEL_MASK		GENMASK(4, 3)
+#define ICM_FLAGS_WRITE			BIT(7)
+
+struct icm_pkg_driver_ready {
+	struct icm_pkg_header hdr;
+};
+
+/* Falcon Ridge only messages */
+
+struct icm_fr_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u8 romver;
+	u8 ramver;
+	u16 security_level;
+};
+
+#define ICM_FR_SLEVEL_MASK		0xf
+
+/* Falcon Ridge & Alpine Ridge common messages */
+
+struct icm_fr_pkg_get_topology {
+	struct icm_pkg_header hdr;
+};
+
+#define ICM_GET_TOPOLOGY_PACKETS	14
+
+struct icm_fr_pkg_get_topology_response {
+	struct icm_pkg_header hdr;
+	u32 route_lo;
+	u32 route_hi;
+	u8 first_data;
+	u8 second_data;
+	u8 drom_i2c_address_index;
+	u8 switch_index;
+	u32 reserved[2];
+	u32 ports[16];
+	u32 port_hop_info[16];
+};
+
+#define ICM_SWITCH_USED			BIT(0)
+#define ICM_SWITCH_UPSTREAM_PORT_MASK	GENMASK(7, 1)
+#define ICM_SWITCH_UPSTREAM_PORT_SHIFT	1
+
+#define ICM_PORT_TYPE_MASK		GENMASK(23, 0)
+#define ICM_PORT_INDEX_SHIFT		24
+#define ICM_PORT_INDEX_MASK		GENMASK(31, 24)
+
+struct icm_fr_event_device_connected {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 link_info;
+	u32 ep_name[55];
+};
+
+#define ICM_LINK_INFO_LINK_MASK		0x7
+#define ICM_LINK_INFO_DEPTH_SHIFT	4
+#define ICM_LINK_INFO_DEPTH_MASK	GENMASK(7, 4)
+#define ICM_LINK_INFO_APPROVED		BIT(8)
+#define ICM_LINK_INFO_REJECTED		BIT(9)
+#define ICM_LINK_INFO_BOOT		BIT(10)
+
+struct icm_fr_pkg_approve_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+};
+
+struct icm_fr_event_device_disconnected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+};
+
+struct icm_fr_event_xdomain_connected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+	uuid_t local_uuid;
+	u32 local_route_hi;
+	u32 local_route_lo;
+	u32 remote_route_hi;
+	u32 remote_route_lo;
+};
+
+struct icm_fr_event_xdomain_disconnected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+};
+
+struct icm_fr_pkg_add_device_key {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 key[8];
+};
+
+struct icm_fr_pkg_add_device_key_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+};
+
+struct icm_fr_pkg_challenge_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 challenge[8];
+};
+
+struct icm_fr_pkg_challenge_device_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 challenge[8];
+	u32 response[8];
+};
+
+struct icm_fr_pkg_approve_xdomain {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+struct icm_fr_pkg_approve_xdomain_response {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+/* Alpine Ridge only messages */
+
+struct icm_ar_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u8 romver;
+	u8 ramver;
+	u16 info;
+};
+
+#define ICM_AR_FLAGS_RTD3		BIT(6)
+
+#define ICM_AR_INFO_SLEVEL_MASK		GENMASK(3, 0)
+#define ICM_AR_INFO_BOOT_ACL_SHIFT	7
+#define ICM_AR_INFO_BOOT_ACL_MASK	GENMASK(11, 7)
+#define ICM_AR_INFO_BOOT_ACL_SUPPORTED	BIT(13)
+
+struct icm_ar_pkg_get_route {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+};
+
+struct icm_ar_pkg_get_route_response {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	u32 route_hi;
+	u32 route_lo;
+};
+
+struct icm_ar_boot_acl_entry {
+	u32 uuid_lo;
+	u32 uuid_hi;
+};
+
+#define ICM_AR_PREBOOT_ACL_ENTRIES	16
+
+struct icm_ar_pkg_preboot_acl {
+	struct icm_pkg_header hdr;
+	struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+struct icm_ar_pkg_preboot_acl_response {
+	struct icm_pkg_header hdr;
+	struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+/* Titan Ridge messages */
+
+struct icm_tr_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u16 reserved1;
+	u16 info;
+	u32 nvm_version;
+	u16 device_id;
+	u16 reserved2;
+};
+
+#define ICM_TR_FLAGS_RTD3		BIT(6)
+
+#define ICM_TR_INFO_SLEVEL_MASK		GENMASK(2, 0)
+#define ICM_TR_INFO_BOOT_ACL_SHIFT	7
+#define ICM_TR_INFO_BOOT_ACL_MASK	GENMASK(12, 7)
+
+struct icm_tr_event_device_connected {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved;
+	u16 link_info;
+	u32 ep_name[55];
+};
+
+struct icm_tr_event_device_disconnected {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+};
+
+struct icm_tr_event_xdomain_connected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	uuid_t remote_uuid;
+	uuid_t local_uuid;
+	u32 local_route_hi;
+	u32 local_route_lo;
+	u32 remote_route_hi;
+	u32 remote_route_lo;
+};
+
+struct icm_tr_event_xdomain_disconnected {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_approve_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved1[3];
+};
+
+struct icm_tr_pkg_add_device_key {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 key[8];
+};
+
+struct icm_tr_pkg_challenge_device {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 challenge[8];
+};
+
+struct icm_tr_pkg_approve_xdomain {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain {
+	struct icm_pkg_header hdr;
+	u8 stage;
+	u8 reserved[3];
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_challenge_device_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+	u32 challenge[8];
+	u32 response[8];
+};
+
+struct icm_tr_pkg_add_device_key_response {
+	struct icm_pkg_header hdr;
+	uuid_t ep_uuid;
+	u32 route_hi;
+	u32 route_lo;
+	u8 connection_id;
+	u8 reserved[3];
+};
+
+struct icm_tr_pkg_approve_xdomain_response {
+	struct icm_pkg_header hdr;
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+	u16 transmit_path;
+	u16 transmit_ring;
+	u16 receive_path;
+	u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain_response {
+	struct icm_pkg_header hdr;
+	u8 stage;
+	u8 reserved[3];
+	u32 route_hi;
+	u32 route_lo;
+	uuid_t remote_uuid;
+};
+
+/* XDomain messages */
+
+struct tb_xdomain_header {
+	u32 route_hi;
+	u32 route_lo;
+	u32 length_sn;
+};
+
+#define TB_XDOMAIN_LENGTH_MASK	GENMASK(5, 0)
+#define TB_XDOMAIN_SN_MASK	GENMASK(28, 27)
+#define TB_XDOMAIN_SN_SHIFT	27
+
+enum tb_xdp_type {
+	UUID_REQUEST_OLD = 1,
+	UUID_RESPONSE = 2,
+	PROPERTIES_REQUEST,
+	PROPERTIES_RESPONSE,
+	PROPERTIES_CHANGED_REQUEST,
+	PROPERTIES_CHANGED_RESPONSE,
+	ERROR_RESPONSE,
+	UUID_REQUEST = 12,
+};
+
+struct tb_xdp_header {
+	struct tb_xdomain_header xd_hdr;
+	uuid_t uuid;
+	u32 type;
+};
+
+struct tb_xdp_properties {
+	struct tb_xdp_header hdr;
+	uuid_t src_uuid;
+	uuid_t dst_uuid;
+	u16 offset;
+	u16 reserved;
+};
+
+struct tb_xdp_properties_response {
+	struct tb_xdp_header hdr;
+	uuid_t src_uuid;
+	uuid_t dst_uuid;
+	u16 offset;
+	u16 data_length;
+	u32 generation;
+	u32 data[0];
+};
+
+/*
+ * Max length of data array single XDomain property response is allowed
+ * to carry.
+ */
+#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH	\
+	(((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4)
+
+/* Maximum size of the total property block in dwords we allow */
+#define TB_XDP_PROPERTIES_MAX_LENGTH		500
+
+struct tb_xdp_properties_changed {
+	struct tb_xdp_header hdr;
+	uuid_t src_uuid;
+};
+
+struct tb_xdp_properties_changed_response {
+	struct tb_xdp_header hdr;
+};
+
+enum tb_xdp_error {
+	ERROR_SUCCESS,
+	ERROR_UNKNOWN_PACKET,
+	ERROR_UNKNOWN_DOMAIN,
+	ERROR_NOT_SUPPORTED,
+	ERROR_NOT_READY,
+};
+
+struct tb_xdp_error_response {
+	struct tb_xdp_header hdr;
+	u32 error;
+};
+
+#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
new file mode 100644
index 0000000..693b035
--- /dev/null
+++ b/drivers/thunderbolt/tb_regs.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
+ *
+ * Every thunderbolt device consists (logically) of a switch with multiple
+ * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
+ * COUNTERS) which are used to configure the device.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_REGS
+#define _TB_REGS
+
+#include <linux/types.h>
+
+
+#define TB_ROUTE_SHIFT 8  /* number of bits in a port entry of a route */
+
+
+/*
+ * TODO: should be 63? But we do not know how to receive frames larger than 256
+ * bytes at the frame level. (header + checksum = 16, 60*4 = 240)
+ */
+#define TB_MAX_CONFIG_RW_LENGTH 60
+
+enum tb_switch_cap {
+	TB_SWITCH_CAP_VSE		= 0x05,
+};
+
+enum tb_switch_vse_cap {
+	TB_VSE_CAP_PLUG_EVENTS		= 0x01, /* also EEPROM */
+	TB_VSE_CAP_TIME2		= 0x03,
+	TB_VSE_CAP_IECS			= 0x04,
+	TB_VSE_CAP_LINK_CONTROLLER	= 0x06, /* also IECS */
+};
+
+enum tb_port_cap {
+	TB_PORT_CAP_PHY			= 0x01,
+	TB_PORT_CAP_TIME1		= 0x03,
+	TB_PORT_CAP_ADAP		= 0x04,
+	TB_PORT_CAP_VSE			= 0x05,
+};
+
+enum tb_port_state {
+	TB_PORT_DISABLED	= 0, /* tb_cap_phy.disable == 1 */
+	TB_PORT_CONNECTING	= 1, /* retry */
+	TB_PORT_UP		= 2,
+	TB_PORT_UNPLUGGED	= 7,
+};
+
+/* capability headers */
+
+struct tb_cap_basic {
+	u8 next;
+	/* enum tb_cap cap:8; prevent "narrower than values of its type" */
+	u8 cap; /* if cap == 0x05 then we have a extended capability */
+} __packed;
+
+/**
+ * struct tb_cap_extended_short - Switch extended short capability
+ * @next: Pointer to the next capability. If @next and @length are zero
+ *	  then we have a long cap.
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @length: Length of this capability
+ */
+struct tb_cap_extended_short {
+	u8 next;
+	u8 cap;
+	u8 vsec_id;
+	u8 length;
+} __packed;
+
+/**
+ * struct tb_cap_extended_long - Switch extended long capability
+ * @zero1: This field should be zero
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @zero2: This field should be zero
+ * @next: Pointer to the next capability
+ * @length: Length of this capability
+ */
+struct tb_cap_extended_long {
+	u8 zero1;
+	u8 cap;
+	u8 vsec_id;
+	u8 zero2;
+	u16 next;
+	u16 length;
+} __packed;
+
+/* capabilities */
+
+struct tb_cap_link_controller {
+	struct tb_cap_extended_long cap_header;
+	u32 count:4; /* number of link controllers */
+	u32 unknown1:4;
+	u32 base_offset:8; /*
+			    * offset (into this capability) of the configuration
+			    * area of the first link controller
+			    */
+	u32 length:12; /* link controller configuration area length */
+	u32 unknown2:4; /* TODO check that length is correct */
+} __packed;
+
+struct tb_cap_phy {
+	struct tb_cap_basic cap_header;
+	u32 unknown1:16;
+	u32 unknown2:14;
+	bool disable:1;
+	u32 unknown3:11;
+	enum tb_port_state state:4;
+	u32 unknown4:2;
+} __packed;
+
+struct tb_eeprom_ctl {
+	bool clock:1; /* send pulse to transfer one bit */
+	bool access_low:1; /* set to 0 before access */
+	bool data_out:1; /* to eeprom */
+	bool data_in:1; /* from eeprom */
+	bool access_high:1; /* set to 1 before access */
+	bool not_present:1; /* should be 0 */
+	bool unknown1:1;
+	bool present:1; /* should be 1 */
+	u32 unknown2:24;
+} __packed;
+
+struct tb_cap_plug_events {
+	struct tb_cap_extended_short cap_header;
+	u32 __unknown1:2;
+	u32 plug_events:5;
+	u32 __unknown2:25;
+	u32 __unknown3;
+	u32 __unknown4;
+	struct tb_eeprom_ctl eeprom_ctl;
+	u32 __unknown5[7];
+	u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */
+} __packed;
+
+/* device headers */
+
+/* Present on port 0 in TB_CFG_SWITCH at address zero. */
+struct tb_regs_switch_header {
+	/* DWORD 0 */
+	u16 vendor_id;
+	u16 device_id;
+	/* DWORD 1 */
+	u32 first_cap_offset:8;
+	u32 upstream_port_number:6;
+	u32 max_port_number:6;
+	u32 depth:3;
+	u32 __unknown1:1;
+	u32 revision:8;
+	/* DWORD 2 */
+	u32 route_lo;
+	/* DWORD 3 */
+	u32 route_hi:31;
+	bool enabled:1;
+	/* DWORD 4 */
+	u32 plug_events_delay:8; /*
+				  * RW, pause between plug events in
+				  * milliseconds. Writing 0x00 is interpreted
+				  * as 255ms.
+				  */
+	u32 __unknown4:16;
+	u32 thunderbolt_version:8;
+} __packed;
+
+enum tb_port_type {
+	TB_TYPE_INACTIVE	= 0x000000,
+	TB_TYPE_PORT		= 0x000001,
+	TB_TYPE_NHI		= 0x000002,
+	/* TB_TYPE_ETHERNET	= 0x020000, lower order bits are not known */
+	/* TB_TYPE_SATA		= 0x080000, lower order bits are not known */
+	TB_TYPE_DP_HDMI_IN	= 0x0e0101,
+	TB_TYPE_DP_HDMI_OUT	= 0x0e0102,
+	TB_TYPE_PCIE_DOWN	= 0x100101,
+	TB_TYPE_PCIE_UP		= 0x100102,
+	/* TB_TYPE_USB		= 0x200000, lower order bits are not known */
+};
+
+/* Present on every port in TB_CF_PORT at address zero. */
+struct tb_regs_port_header {
+	/* DWORD 0 */
+	u16 vendor_id;
+	u16 device_id;
+	/* DWORD 1 */
+	u32 first_cap_offset:8;
+	u32 max_counters:11;
+	u32 __unknown1:5;
+	u32 revision:8;
+	/* DWORD 2 */
+	enum tb_port_type type:24;
+	u32 thunderbolt_version:8;
+	/* DWORD 3 */
+	u32 __unknown2:20;
+	u32 port_number:6;
+	u32 __unknown3:6;
+	/* DWORD 4 */
+	u32 nfc_credits;
+	/* DWORD 5 */
+	u32 max_in_hop_id:11;
+	u32 max_out_hop_id:11;
+	u32 __unknown4:10;
+	/* DWORD 6 */
+	u32 __unknown5;
+	/* DWORD 7 */
+	u32 __unknown6;
+
+} __packed;
+
+/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
+struct tb_regs_hop {
+	/* DWORD 0 */
+	u32 next_hop:11; /*
+			  * hop to take after sending the packet through
+			  * out_port (on the incoming port of the next switch)
+			  */
+	u32 out_port:6; /* next port of the path (on the same switch) */
+	u32 initial_credits:8;
+	u32 unknown1:6; /* set to zero */
+	bool enable:1;
+
+	/* DWORD 1 */
+	u32 weight:4;
+	u32 unknown2:4; /* set to zero */
+	u32 priority:3;
+	bool drop_packages:1;
+	u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
+	bool counter_enable:1;
+	bool ingress_fc:1;
+	bool egress_fc:1;
+	bool ingress_shared_buffer:1;
+	bool egress_shared_buffer:1;
+	u32 unknown3:4; /* set to zero */
+} __packed;
+
+
+#endif
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
new file mode 100644
index 0000000..0637537
--- /dev/null
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Cactus Ridge driver - PCIe tunnel
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tunnel_pci.h"
+#include "tb.h"
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
+	do {                                                            \
+		struct tb_pci_tunnel *__tunnel = (tunnel);              \
+		level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt,  \
+		      tb_route(__tunnel->down_port->sw),                \
+		      __tunnel->down_port->port,                        \
+		      tb_route(__tunnel->up_port->sw),                  \
+		      __tunnel->up_port->port,                          \
+		      ## arg);                                          \
+	} while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 3;
+	path->weight = 1;
+	path->drop_packages = 0;
+	path->nfc_credits = 0;
+}
+
+/**
+ * tb_pci_alloc() - allocate a pci tunnel
+ *
+ * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Currently only paths consisting of two hops are supported (that is the
+ * ports must be on "adjacent" switches).
+ *
+ * The paths are hard-coded to use hop 8 (the only working hop id available on
+ * my thunderbolt devices). Therefore at most ONE path per device may be
+ * activated.
+ *
+ * Return: Returns a tb_pci_tunnel on success or NULL on failure.
+ */
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+				   struct tb_port *down)
+{
+	struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+	if (!tunnel)
+		goto err;
+	tunnel->tb = tb;
+	tunnel->down_port = down;
+	tunnel->up_port = up;
+	INIT_LIST_HEAD(&tunnel->list);
+	tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
+	if (!tunnel->path_to_up)
+		goto err;
+	tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
+	if (!tunnel->path_to_down)
+		goto err;
+	tb_pci_init_path(tunnel->path_to_up);
+	tb_pci_init_path(tunnel->path_to_down);
+
+	tunnel->path_to_up->hops[0].in_port = down;
+	tunnel->path_to_up->hops[0].in_hop_index = 8;
+	tunnel->path_to_up->hops[0].in_counter_index = -1;
+	tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
+	tunnel->path_to_up->hops[0].next_hop_index = 8;
+
+	tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
+	tunnel->path_to_up->hops[1].in_hop_index = 8;
+	tunnel->path_to_up->hops[1].in_counter_index = -1;
+	tunnel->path_to_up->hops[1].out_port = up;
+	tunnel->path_to_up->hops[1].next_hop_index = 8;
+
+	tunnel->path_to_down->hops[0].in_port = up;
+	tunnel->path_to_down->hops[0].in_hop_index = 8;
+	tunnel->path_to_down->hops[0].in_counter_index = -1;
+	tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
+	tunnel->path_to_down->hops[0].next_hop_index = 8;
+
+	tunnel->path_to_down->hops[1].in_port =
+		tb_upstream_port(up->sw)->remote;
+	tunnel->path_to_down->hops[1].in_hop_index = 8;
+	tunnel->path_to_down->hops[1].in_counter_index = -1;
+	tunnel->path_to_down->hops[1].out_port = down;
+	tunnel->path_to_down->hops[1].next_hop_index = 8;
+	return tunnel;
+
+err:
+	if (tunnel) {
+		if (tunnel->path_to_down)
+			tb_path_free(tunnel->path_to_down);
+		if (tunnel->path_to_up)
+			tb_path_free(tunnel->path_to_up);
+		kfree(tunnel);
+	}
+	return NULL;
+}
+
+/**
+ * tb_pci_free() - free a tunnel
+ *
+ * The tunnel must have been deactivated.
+ */
+void tb_pci_free(struct tb_pci_tunnel *tunnel)
+{
+	if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+		tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
+		return;
+	}
+	tb_path_free(tunnel->path_to_up);
+	tb_path_free(tunnel->path_to_down);
+	kfree(tunnel);
+}
+
+/**
+ * tb_pci_is_invalid - check whether an activated path is still valid
+ */
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
+{
+	WARN_ON(!tunnel->path_to_up->activated);
+	WARN_ON(!tunnel->path_to_down->activated);
+
+	return tb_path_is_invalid(tunnel->path_to_up)
+	       || tb_path_is_invalid(tunnel->path_to_down);
+}
+
+/**
+ * tb_pci_port_active() - activate/deactivate PCI capability
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_pci_port_active(struct tb_port *port, bool active)
+{
+	u32 word = active ? 0x80000000 : 0x0;
+	int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+	if (cap < 0) {
+		tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
+		return cap;
+	}
+	return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
+}
+
+/**
+ * tb_pci_restart() - activate a tunnel after a hardware reset
+ */
+int tb_pci_restart(struct tb_pci_tunnel *tunnel)
+{
+	int res;
+	tunnel->path_to_up->activated = false;
+	tunnel->path_to_down->activated = false;
+
+	tb_tunnel_info(tunnel, "activating\n");
+
+	res = tb_path_activate(tunnel->path_to_up);
+	if (res)
+		goto err;
+	res = tb_path_activate(tunnel->path_to_down);
+	if (res)
+		goto err;
+
+	res = tb_pci_port_active(tunnel->down_port, true);
+	if (res)
+		goto err;
+
+	res = tb_pci_port_active(tunnel->up_port, true);
+	if (res)
+		goto err;
+	return 0;
+err:
+	tb_tunnel_warn(tunnel, "activation failed\n");
+	tb_pci_deactivate(tunnel);
+	return res;
+}
+
+/**
+ * tb_pci_activate() - activate a tunnel
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_pci_activate(struct tb_pci_tunnel *tunnel)
+{
+	if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+		tb_tunnel_WARN(tunnel,
+			       "trying to activate an already activated tunnel\n");
+		return -EINVAL;
+	}
+
+	return tb_pci_restart(tunnel);
+}
+
+
+
+/**
+ * tb_pci_deactivate() - deactivate a tunnel
+ */
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
+{
+	tb_tunnel_info(tunnel, "deactivating\n");
+	/*
+	 * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
+	 * port. Seems to have no effect?
+	 */
+	tb_pci_port_active(tunnel->up_port, false);
+	tb_pci_port_active(tunnel->down_port, false);
+	if (tunnel->path_to_down->activated)
+		tb_path_deactivate(tunnel->path_to_down);
+	if (tunnel->path_to_up->activated)
+		tb_path_deactivate(tunnel->path_to_up);
+}
+
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
new file mode 100644
index 0000000..f9b65fa
--- /dev/null
+++ b/drivers/thunderbolt/tunnel_pci.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt Cactus Ridge driver - PCIe tunnel
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_PCI_H_
+#define TB_PCI_H_
+
+#include "tb.h"
+
+struct tb_pci_tunnel {
+	struct tb *tb;
+	struct tb_port *up_port;
+	struct tb_port *down_port;
+	struct tb_path *path_to_up;
+	struct tb_path *path_to_down;
+	struct list_head list;
+};
+
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+				   struct tb_port *down);
+void tb_pci_free(struct tb_pci_tunnel *tunnel);
+int tb_pci_activate(struct tb_pci_tunnel *tunnel);
+int tb_pci_restart(struct tb_pci_tunnel *tunnel);
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
+
+#endif
+
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
new file mode 100644
index 0000000..db8bece
--- /dev/null
+++ b/drivers/thunderbolt/xdomain.c
@@ -0,0 +1,1611 @@
+/*
+ * Thunderbolt XDomain discovery protocol support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include "tb.h"
+
+#define XDOMAIN_DEFAULT_TIMEOUT			5000 /* ms */
+#define XDOMAIN_PROPERTIES_RETRIES		60
+#define XDOMAIN_PROPERTIES_CHANGED_RETRIES	10
+
+struct xdomain_request_work {
+	struct work_struct work;
+	struct tb_xdp_header *pkg;
+	struct tb *tb;
+};
+
+/* Serializes access to the properties and protocol handlers below */
+static DEFINE_MUTEX(xdomain_lock);
+
+/* Properties exposed to the remote domains */
+static struct tb_property_dir *xdomain_property_dir;
+static u32 *xdomain_property_block;
+static u32 xdomain_property_block_len;
+static u32 xdomain_property_block_gen;
+
+/* Additional protocol handlers */
+static LIST_HEAD(protocol_handlers);
+
+/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
+static const uuid_t tb_xdp_uuid =
+	UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
+		  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+
+static bool tb_xdomain_match(const struct tb_cfg_request *req,
+			     const struct ctl_pkg *pkg)
+{
+	switch (pkg->frame.eof) {
+	case TB_CFG_PKG_ERROR:
+		return true;
+
+	case TB_CFG_PKG_XDOMAIN_RESP: {
+		const struct tb_xdp_header *res_hdr = pkg->buffer;
+		const struct tb_xdp_header *req_hdr = req->request;
+
+		if (pkg->frame.size < req->response_size / 4)
+			return false;
+
+		/* Make sure route matches */
+		if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
+		     req_hdr->xd_hdr.route_hi)
+			return false;
+		if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
+			return false;
+
+		/* Check that the XDomain protocol matches */
+		if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
+			return false;
+
+		return true;
+	}
+
+	default:
+		return false;
+	}
+}
+
+static bool tb_xdomain_copy(struct tb_cfg_request *req,
+			    const struct ctl_pkg *pkg)
+{
+	memcpy(req->response, pkg->buffer, req->response_size);
+	req->result.err = 0;
+	return true;
+}
+
+static void response_ready(void *data)
+{
+	tb_cfg_request_put(data);
+}
+
+static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
+				 size_t size, enum tb_cfg_pkg_type type)
+{
+	struct tb_cfg_request *req;
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = tb_xdomain_match;
+	req->copy = tb_xdomain_copy;
+	req->request = response;
+	req->request_size = size;
+	req->request_type = type;
+
+	return tb_cfg_request(ctl, req, response_ready, req);
+}
+
+/**
+ * tb_xdomain_response() - Send a XDomain response message
+ * @xd: XDomain to send the message
+ * @response: Response to send
+ * @size: Size of the response
+ * @type: PDF type of the response
+ *
+ * This can be used to send a XDomain response message to the other
+ * domain. No response for the message is expected.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
+			size_t size, enum tb_cfg_pkg_type type)
+{
+	return __tb_xdomain_response(xd->tb->ctl, response, size, type);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_response);
+
+static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
+	size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
+	size_t response_size, enum tb_cfg_pkg_type response_type,
+	unsigned int timeout_msec)
+{
+	struct tb_cfg_request *req;
+	struct tb_cfg_result res;
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = tb_xdomain_match;
+	req->copy = tb_xdomain_copy;
+	req->request = request;
+	req->request_size = request_size;
+	req->request_type = request_type;
+	req->response = response;
+	req->response_size = response_size;
+	req->response_type = response_type;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	return res.err == 1 ? -EIO : res.err;
+}
+
+/**
+ * tb_xdomain_request() - Send a XDomain request
+ * @xd: XDomain to send the request
+ * @request: Request to send
+ * @request_size: Size of the request in bytes
+ * @request_type: PDF type of the request
+ * @response: Response is copied here
+ * @response_size: Expected size of the response in bytes
+ * @response_type: Expected PDF type of the response
+ * @timeout_msec: Timeout in milliseconds to wait for the response
+ *
+ * This function can be used to send XDomain control channel messages to
+ * the other domain. The function waits until the response is received
+ * or when timeout triggers. Whichever comes first.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
+	size_t request_size, enum tb_cfg_pkg_type request_type,
+	void *response, size_t response_size,
+	enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
+{
+	return __tb_xdomain_request(xd->tb->ctl, request, request_size,
+				    request_type, response, response_size,
+				    response_type, timeout_msec);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_request);
+
+static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
+	u8 sequence, enum tb_xdp_type type, size_t size)
+{
+	u32 length_sn;
+
+	length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
+	length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
+
+	hdr->xd_hdr.route_hi = upper_32_bits(route);
+	hdr->xd_hdr.route_lo = lower_32_bits(route);
+	hdr->xd_hdr.length_sn = length_sn;
+	hdr->type = type;
+	memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
+}
+
+static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+{
+	const struct tb_xdp_error_response *error;
+
+	if (hdr->type != ERROR_RESPONSE)
+		return 0;
+
+	error = (const struct tb_xdp_error_response *)hdr;
+
+	switch (error->error) {
+	case ERROR_UNKNOWN_PACKET:
+	case ERROR_UNKNOWN_DOMAIN:
+		return -EIO;
+	case ERROR_NOT_SUPPORTED:
+		return -ENOTSUPP;
+	case ERROR_NOT_READY:
+		return -EAGAIN;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+				 enum tb_xdp_error error)
+{
+	struct tb_xdp_error_response res;
+
+	memset(&res, 0, sizeof(res));
+	tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
+			   sizeof(res));
+	res.error = error;
+
+	return __tb_xdomain_response(ctl, &res, sizeof(res),
+				     TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
+	const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
+	u32 **block, u32 *generation)
+{
+	struct tb_xdp_properties_response *res;
+	struct tb_xdp_properties req;
+	u16 data_len, len;
+	size_t total_size;
+	u32 *data = NULL;
+	int ret;
+
+	total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
+	res = kzalloc(total_size, GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	memset(&req, 0, sizeof(req));
+	tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
+			   sizeof(req));
+	memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
+	memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
+
+	len = 0;
+	data_len = 0;
+
+	do {
+		ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+					   TB_CFG_PKG_XDOMAIN_REQ, res,
+					   total_size, TB_CFG_PKG_XDOMAIN_RESP,
+					   XDOMAIN_DEFAULT_TIMEOUT);
+		if (ret)
+			goto err;
+
+		ret = tb_xdp_handle_error(&res->hdr);
+		if (ret)
+			goto err;
+
+		/*
+		 * Package length includes the whole payload without the
+		 * XDomain header. Validate first that the package is at
+		 * least size of the response structure.
+		 */
+		len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+		if (len < sizeof(*res) / 4) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		len += sizeof(res->hdr.xd_hdr) / 4;
+		len -= sizeof(*res) / 4;
+
+		if (res->offset != req.offset) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/*
+		 * First time allocate block that has enough space for
+		 * the whole properties block.
+		 */
+		if (!data) {
+			data_len = res->data_length;
+			if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
+				ret = -E2BIG;
+				goto err;
+			}
+
+			data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
+			if (!data) {
+				ret = -ENOMEM;
+				goto err;
+			}
+		}
+
+		memcpy(data + req.offset, res->data, len * 4);
+		req.offset += len;
+	} while (!data_len || req.offset < data_len);
+
+	*block = data;
+	*generation = res->generation;
+
+	kfree(res);
+
+	return data_len;
+
+err:
+	kfree(data);
+	kfree(res);
+
+	return ret;
+}
+
+static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
+	u64 route, u8 sequence, const uuid_t *src_uuid,
+	const struct tb_xdp_properties *req)
+{
+	struct tb_xdp_properties_response *res;
+	size_t total_size;
+	u16 len;
+	int ret;
+
+	/*
+	 * Currently we expect all requests to be directed to us. The
+	 * protocol supports forwarding, though which we might add
+	 * support later on.
+	 */
+	if (!uuid_equal(src_uuid, &req->dst_uuid)) {
+		tb_xdp_error_response(ctl, route, sequence,
+				      ERROR_UNKNOWN_DOMAIN);
+		return 0;
+	}
+
+	mutex_lock(&xdomain_lock);
+
+	if (req->offset >= xdomain_property_block_len) {
+		mutex_unlock(&xdomain_lock);
+		return -EINVAL;
+	}
+
+	len = xdomain_property_block_len - req->offset;
+	len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
+	total_size = sizeof(*res) + len * 4;
+
+	res = kzalloc(total_size, GFP_KERNEL);
+	if (!res) {
+		mutex_unlock(&xdomain_lock);
+		return -ENOMEM;
+	}
+
+	tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
+			   total_size);
+	res->generation = xdomain_property_block_gen;
+	res->data_length = xdomain_property_block_len;
+	res->offset = req->offset;
+	uuid_copy(&res->src_uuid, src_uuid);
+	uuid_copy(&res->dst_uuid, &req->src_uuid);
+	memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
+
+	mutex_unlock(&xdomain_lock);
+
+	ret = __tb_xdomain_response(ctl, res, total_size,
+				    TB_CFG_PKG_XDOMAIN_RESP);
+
+	kfree(res);
+	return ret;
+}
+
+static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
+					     int retry, const uuid_t *uuid)
+{
+	struct tb_xdp_properties_changed_response res;
+	struct tb_xdp_properties_changed req;
+	int ret;
+
+	memset(&req, 0, sizeof(req));
+	tb_xdp_fill_header(&req.hdr, route, retry % 4,
+			   PROPERTIES_CHANGED_REQUEST, sizeof(req));
+	uuid_copy(&req.src_uuid, uuid);
+
+	memset(&res, 0, sizeof(res));
+	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+				   TB_CFG_PKG_XDOMAIN_RESP,
+				   XDOMAIN_DEFAULT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return tb_xdp_handle_error(&res.hdr);
+}
+
+static int
+tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
+{
+	struct tb_xdp_properties_changed_response res;
+
+	memset(&res, 0, sizeof(res));
+	tb_xdp_fill_header(&res.hdr, route, sequence,
+			   PROPERTIES_CHANGED_RESPONSE, sizeof(res));
+	return __tb_xdomain_response(ctl, &res, sizeof(res),
+				     TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+/**
+ * tb_register_protocol_handler() - Register protocol handler
+ * @handler: Handler to register
+ *
+ * This allows XDomain service drivers to hook into incoming XDomain
+ * messages. After this function is called the service driver needs to
+ * be able to handle calls to callback whenever a package with the
+ * registered protocol is received.
+ */
+int tb_register_protocol_handler(struct tb_protocol_handler *handler)
+{
+	if (!handler->uuid || !handler->callback)
+		return -EINVAL;
+	if (uuid_equal(handler->uuid, &tb_xdp_uuid))
+		return -EINVAL;
+
+	mutex_lock(&xdomain_lock);
+	list_add_tail(&handler->list, &protocol_handlers);
+	mutex_unlock(&xdomain_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
+
+/**
+ * tb_unregister_protocol_handler() - Unregister protocol handler
+ * @handler: Handler to unregister
+ *
+ * Removes the previously registered protocol handler.
+ */
+void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
+{
+	mutex_lock(&xdomain_lock);
+	list_del_init(&handler->list);
+	mutex_unlock(&xdomain_lock);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
+
+static void tb_xdp_handle_request(struct work_struct *work)
+{
+	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+	const struct tb_xdp_header *pkg = xw->pkg;
+	const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
+	struct tb *tb = xw->tb;
+	struct tb_ctl *ctl = tb->ctl;
+	const uuid_t *uuid;
+	int ret = 0;
+	u32 sequence;
+	u64 route;
+
+	route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
+	sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
+	sequence >>= TB_XDOMAIN_SN_SHIFT;
+
+	mutex_lock(&tb->lock);
+	if (tb->root_switch)
+		uuid = tb->root_switch->uuid;
+	else
+		uuid = NULL;
+	mutex_unlock(&tb->lock);
+
+	if (!uuid) {
+		tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
+		goto out;
+	}
+
+	switch (pkg->type) {
+	case PROPERTIES_REQUEST:
+		ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
+			(const struct tb_xdp_properties *)pkg);
+		break;
+
+	case PROPERTIES_CHANGED_REQUEST: {
+		const struct tb_xdp_properties_changed *xchg =
+			(const struct tb_xdp_properties_changed *)pkg;
+		struct tb_xdomain *xd;
+
+		ret = tb_xdp_properties_changed_response(ctl, route, sequence);
+
+		/*
+		 * Since the properties have been changed, let's update
+		 * the xdomain related to this connection as well in
+		 * case there is a change in services it offers.
+		 */
+		xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
+		if (xd) {
+			queue_delayed_work(tb->wq, &xd->get_properties_work,
+					   msecs_to_jiffies(50));
+			tb_xdomain_put(xd);
+		}
+
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	if (ret) {
+		tb_warn(tb, "failed to send XDomain response for %#x\n",
+			pkg->type);
+	}
+
+out:
+	kfree(xw->pkg);
+	kfree(xw);
+}
+
+static void
+tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
+			size_t size)
+{
+	struct xdomain_request_work *xw;
+
+	xw = kmalloc(sizeof(*xw), GFP_KERNEL);
+	if (!xw)
+		return;
+
+	INIT_WORK(&xw->work, tb_xdp_handle_request);
+	xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
+	xw->tb = tb;
+
+	queue_work(tb->wq, &xw->work);
+}
+
+/**
+ * tb_register_service_driver() - Register XDomain service driver
+ * @drv: Driver to register
+ *
+ * Registers new service driver from @drv to the bus.
+ */
+int tb_register_service_driver(struct tb_service_driver *drv)
+{
+	drv->driver.bus = &tb_bus_type;
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_register_service_driver);
+
+/**
+ * tb_unregister_service_driver() - Unregister XDomain service driver
+ * @xdrv: Driver to unregister
+ *
+ * Unregisters XDomain service driver from the bus.
+ */
+void tb_unregister_service_driver(struct tb_service_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	/*
+	 * It should be null terminated but anything else is pretty much
+	 * allowed.
+	 */
+	return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+}
+static DEVICE_ATTR_RO(key);
+
+static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+{
+	return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
+			svc->prtcid, svc->prtcvers, svc->prtcrevs);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	/* Full buffer size except new line and null termination */
+	get_modalias(svc, buf, PAGE_SIZE - 2);
+	return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	return sprintf(buf, "%u\n", svc->prtcid);
+}
+static DEVICE_ATTR_RO(prtcid);
+
+static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	return sprintf(buf, "%u\n", svc->prtcvers);
+}
+static DEVICE_ATTR_RO(prtcvers);
+
+static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	return sprintf(buf, "%u\n", svc->prtcrevs);
+}
+static DEVICE_ATTR_RO(prtcrevs);
+
+static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+	return sprintf(buf, "0x%08x\n", svc->prtcstns);
+}
+static DEVICE_ATTR_RO(prtcstns);
+
+static struct attribute *tb_service_attrs[] = {
+	&dev_attr_key.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_prtcid.attr,
+	&dev_attr_prtcvers.attr,
+	&dev_attr_prtcrevs.attr,
+	&dev_attr_prtcstns.attr,
+	NULL,
+};
+
+static struct attribute_group tb_service_attr_group = {
+	.attrs = tb_service_attrs,
+};
+
+static const struct attribute_group *tb_service_attr_groups[] = {
+	&tb_service_attr_group,
+	NULL,
+};
+
+static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+	char modalias[64];
+
+	get_modalias(svc, modalias, sizeof(modalias));
+	return add_uevent_var(env, "MODALIAS=%s", modalias);
+}
+
+static void tb_service_release(struct device *dev)
+{
+	struct tb_service *svc = container_of(dev, struct tb_service, dev);
+	struct tb_xdomain *xd = tb_service_parent(svc);
+
+	ida_simple_remove(&xd->service_ids, svc->id);
+	kfree(svc->key);
+	kfree(svc);
+}
+
+struct device_type tb_service_type = {
+	.name = "thunderbolt_service",
+	.groups = tb_service_attr_groups,
+	.uevent = tb_service_uevent,
+	.release = tb_service_release,
+};
+EXPORT_SYMBOL_GPL(tb_service_type);
+
+static int remove_missing_service(struct device *dev, void *data)
+{
+	struct tb_xdomain *xd = data;
+	struct tb_service *svc;
+
+	svc = tb_to_service(dev);
+	if (!svc)
+		return 0;
+
+	if (!tb_property_find(xd->properties, svc->key,
+			      TB_PROPERTY_TYPE_DIRECTORY))
+		device_unregister(dev);
+
+	return 0;
+}
+
+static int find_service(struct device *dev, void *data)
+{
+	const struct tb_property *p = data;
+	struct tb_service *svc;
+
+	svc = tb_to_service(dev);
+	if (!svc)
+		return 0;
+
+	return !strcmp(svc->key, p->key);
+}
+
+static int populate_service(struct tb_service *svc,
+			    struct tb_property *property)
+{
+	struct tb_property_dir *dir = property->value.dir;
+	struct tb_property *p;
+
+	/* Fill in standard properties */
+	p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
+	if (p)
+		svc->prtcid = p->value.immediate;
+	p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
+	if (p)
+		svc->prtcvers = p->value.immediate;
+	p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
+	if (p)
+		svc->prtcrevs = p->value.immediate;
+	p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
+	if (p)
+		svc->prtcstns = p->value.immediate;
+
+	svc->key = kstrdup(property->key, GFP_KERNEL);
+	if (!svc->key)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void enumerate_services(struct tb_xdomain *xd)
+{
+	struct tb_service *svc;
+	struct tb_property *p;
+	struct device *dev;
+
+	/*
+	 * First remove all services that are not available anymore in
+	 * the updated property block.
+	 */
+	device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
+
+	/* Then re-enumerate properties creating new services as we go */
+	tb_property_for_each(xd->properties, p) {
+		if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
+			continue;
+
+		/* If the service exists already we are fine */
+		dev = device_find_child(&xd->dev, p, find_service);
+		if (dev) {
+			put_device(dev);
+			continue;
+		}
+
+		svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+		if (!svc)
+			break;
+
+		if (populate_service(svc, p)) {
+			kfree(svc);
+			break;
+		}
+
+		svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+		svc->dev.bus = &tb_bus_type;
+		svc->dev.type = &tb_service_type;
+		svc->dev.parent = &xd->dev;
+		dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
+
+		if (device_register(&svc->dev)) {
+			put_device(&svc->dev);
+			break;
+		}
+	}
+}
+
+static int populate_properties(struct tb_xdomain *xd,
+			       struct tb_property_dir *dir)
+{
+	const struct tb_property *p;
+
+	/* Required properties */
+	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
+	if (!p)
+		return -EINVAL;
+	xd->device = p->value.immediate;
+
+	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
+	if (!p)
+		return -EINVAL;
+	xd->vendor = p->value.immediate;
+
+	kfree(xd->device_name);
+	xd->device_name = NULL;
+	kfree(xd->vendor_name);
+	xd->vendor_name = NULL;
+
+	/* Optional properties */
+	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
+	if (p)
+		xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
+	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
+	if (p)
+		xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
+
+	return 0;
+}
+
+/* Called with @xd->lock held */
+static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
+{
+	if (!xd->resume)
+		return;
+
+	xd->resume = false;
+	if (xd->transmit_path) {
+		dev_dbg(&xd->dev, "re-establishing DMA path\n");
+		tb_domain_approve_xdomain_paths(xd->tb, xd);
+	}
+}
+
+static void tb_xdomain_get_properties(struct work_struct *work)
+{
+	struct tb_xdomain *xd = container_of(work, typeof(*xd),
+					     get_properties_work.work);
+	struct tb_property_dir *dir;
+	struct tb *tb = xd->tb;
+	bool update = false;
+	u32 *block = NULL;
+	u32 gen = 0;
+	int ret;
+
+	ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
+					xd->remote_uuid, xd->properties_retries,
+					&block, &gen);
+	if (ret < 0) {
+		if (xd->properties_retries-- > 0) {
+			queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+					   msecs_to_jiffies(1000));
+		} else {
+			/* Give up now */
+			dev_err(&xd->dev,
+				"failed read XDomain properties from %pUb\n",
+				xd->remote_uuid);
+		}
+		return;
+	}
+
+	xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+
+	mutex_lock(&xd->lock);
+
+	/* Only accept newer generation properties */
+	if (xd->properties && gen <= xd->property_block_gen) {
+		/*
+		 * On resume it is likely that the properties block is
+		 * not changed (unless the other end added or removed
+		 * services). However, we need to make sure the existing
+		 * DMA paths are restored properly.
+		 */
+		tb_xdomain_restore_paths(xd);
+		goto err_free_block;
+	}
+
+	dir = tb_property_parse_dir(block, ret);
+	if (!dir) {
+		dev_err(&xd->dev, "failed to parse XDomain properties\n");
+		goto err_free_block;
+	}
+
+	ret = populate_properties(xd, dir);
+	if (ret) {
+		dev_err(&xd->dev, "missing XDomain properties in response\n");
+		goto err_free_dir;
+	}
+
+	/* Release the existing one */
+	if (xd->properties) {
+		tb_property_free_dir(xd->properties);
+		update = true;
+	}
+
+	xd->properties = dir;
+	xd->property_block_gen = gen;
+
+	tb_xdomain_restore_paths(xd);
+
+	mutex_unlock(&xd->lock);
+
+	kfree(block);
+
+	/*
+	 * Now the device should be ready enough so we can add it to the
+	 * bus and let userspace know about it. If the device is already
+	 * registered, we notify the userspace that it has changed.
+	 */
+	if (!update) {
+		if (device_add(&xd->dev)) {
+			dev_err(&xd->dev, "failed to add XDomain device\n");
+			return;
+		}
+	} else {
+		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
+	}
+
+	enumerate_services(xd);
+	return;
+
+err_free_dir:
+	tb_property_free_dir(dir);
+err_free_block:
+	kfree(block);
+	mutex_unlock(&xd->lock);
+}
+
+static void tb_xdomain_properties_changed(struct work_struct *work)
+{
+	struct tb_xdomain *xd = container_of(work, typeof(*xd),
+					     properties_changed_work.work);
+	int ret;
+
+	ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
+				xd->properties_changed_retries, xd->local_uuid);
+	if (ret) {
+		if (xd->properties_changed_retries-- > 0)
+			queue_delayed_work(xd->tb->wq,
+					   &xd->properties_changed_work,
+					   msecs_to_jiffies(1000));
+		return;
+	}
+
+	xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+	return sprintf(buf, "%#x\n", xd->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+	int ret;
+
+	if (mutex_lock_interruptible(&xd->lock))
+		return -ERESTARTSYS;
+	ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+	mutex_unlock(&xd->lock);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+	return sprintf(buf, "%#x\n", xd->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+	int ret;
+
+	if (mutex_lock_interruptible(&xd->lock))
+		return -ERESTARTSYS;
+	ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+	mutex_unlock(&xd->lock);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+	return sprintf(buf, "%pUb\n", xd->remote_uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *xdomain_attrs[] = {
+	&dev_attr_device.attr,
+	&dev_attr_device_name.attr,
+	&dev_attr_unique_id.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_vendor_name.attr,
+	NULL,
+};
+
+static struct attribute_group xdomain_attr_group = {
+	.attrs = xdomain_attrs,
+};
+
+static const struct attribute_group *xdomain_attr_groups[] = {
+	&xdomain_attr_group,
+	NULL,
+};
+
+static void tb_xdomain_release(struct device *dev)
+{
+	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+	put_device(xd->dev.parent);
+
+	tb_property_free_dir(xd->properties);
+	ida_destroy(&xd->service_ids);
+
+	kfree(xd->local_uuid);
+	kfree(xd->remote_uuid);
+	kfree(xd->device_name);
+	kfree(xd->vendor_name);
+	kfree(xd);
+}
+
+static void start_handshake(struct tb_xdomain *xd)
+{
+	xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+	xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+
+	/* Start exchanging properties with the other host */
+	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+			   msecs_to_jiffies(100));
+	queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+			   msecs_to_jiffies(1000));
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+	xd->properties_retries = 0;
+	xd->properties_changed_retries = 0;
+
+	cancel_delayed_work_sync(&xd->get_properties_work);
+	cancel_delayed_work_sync(&xd->properties_changed_work);
+}
+
+static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+{
+	stop_handshake(tb_to_xdomain(dev));
+	return 0;
+}
+
+static int __maybe_unused tb_xdomain_resume(struct device *dev)
+{
+	struct tb_xdomain *xd = tb_to_xdomain(dev);
+
+	/*
+	 * Ask tb_xdomain_get_properties() restore any existing DMA
+	 * paths after properties are re-read.
+	 */
+	xd->resume = true;
+	start_handshake(xd);
+
+	return 0;
+}
+
+static const struct dev_pm_ops tb_xdomain_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
+};
+
+struct device_type tb_xdomain_type = {
+	.name = "thunderbolt_xdomain",
+	.release = tb_xdomain_release,
+	.pm = &tb_xdomain_pm_ops,
+};
+EXPORT_SYMBOL_GPL(tb_xdomain_type);
+
+/**
+ * tb_xdomain_alloc() - Allocate new XDomain object
+ * @tb: Domain where the XDomain belongs
+ * @parent: Parent device (the switch through the connection to the
+ *	    other domain is reached).
+ * @route: Route string used to reach the other domain
+ * @local_uuid: Our local domain UUID
+ * @remote_uuid: UUID of the other domain
+ *
+ * Allocates new XDomain structure and returns pointer to that. The
+ * object must be released by calling tb_xdomain_put().
+ */
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+				    u64 route, const uuid_t *local_uuid,
+				    const uuid_t *remote_uuid)
+{
+	struct tb_xdomain *xd;
+
+	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
+	if (!xd)
+		return NULL;
+
+	xd->tb = tb;
+	xd->route = route;
+	ida_init(&xd->service_ids);
+	mutex_init(&xd->lock);
+	INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+	INIT_DELAYED_WORK(&xd->properties_changed_work,
+			  tb_xdomain_properties_changed);
+
+	xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
+	if (!xd->local_uuid)
+		goto err_free;
+
+	xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
+	if (!xd->remote_uuid)
+		goto err_free_local_uuid;
+
+	device_initialize(&xd->dev);
+	xd->dev.parent = get_device(parent);
+	xd->dev.bus = &tb_bus_type;
+	xd->dev.type = &tb_xdomain_type;
+	xd->dev.groups = xdomain_attr_groups;
+	dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+
+	/*
+	 * This keeps the DMA powered on as long as we have active
+	 * connection to another host.
+	 */
+	pm_runtime_set_active(&xd->dev);
+	pm_runtime_get_noresume(&xd->dev);
+	pm_runtime_enable(&xd->dev);
+
+	return xd;
+
+err_free_local_uuid:
+	kfree(xd->local_uuid);
+err_free:
+	kfree(xd);
+
+	return NULL;
+}
+
+/**
+ * tb_xdomain_add() - Add XDomain to the bus
+ * @xd: XDomain to add
+ *
+ * This function starts XDomain discovery protocol handshake and
+ * eventually adds the XDomain to the bus. After calling this function
+ * the caller needs to call tb_xdomain_remove() in order to remove and
+ * release the object regardless whether the handshake succeeded or not.
+ */
+void tb_xdomain_add(struct tb_xdomain *xd)
+{
+	/* Start exchanging properties with the other host */
+	start_handshake(xd);
+}
+
+static int unregister_service(struct device *dev, void *data)
+{
+	device_unregister(dev);
+	return 0;
+}
+
+/**
+ * tb_xdomain_remove() - Remove XDomain from the bus
+ * @xd: XDomain to remove
+ *
+ * This will stop all ongoing configuration work and remove the XDomain
+ * along with any services from the bus. When the last reference to @xd
+ * is released the object will be released as well.
+ */
+void tb_xdomain_remove(struct tb_xdomain *xd)
+{
+	stop_handshake(xd);
+
+	device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+
+	/*
+	 * Undo runtime PM here explicitly because it is possible that
+	 * the XDomain was never added to the bus and thus device_del()
+	 * is not called for it (device_del() would handle this otherwise).
+	 */
+	pm_runtime_disable(&xd->dev);
+	pm_runtime_put_noidle(&xd->dev);
+	pm_runtime_set_suspended(&xd->dev);
+
+	if (!device_is_registered(&xd->dev))
+		put_device(&xd->dev);
+	else
+		device_unregister(&xd->dev);
+}
+
+/**
+ * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID of the transmit path the other end is using to
+ *		   send packets
+ * @transmit_ring: DMA ring used to receive packets from the other end
+ * @receive_path: HopID of the receive path the other end is using to
+ *		  receive packets
+ * @receive_ring: DMA ring used to send packets to the other end
+ *
+ * The function enables DMA paths accordingly so that after successful
+ * return the caller can send and receive packets using high-speed DMA
+ * path.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
+			    u16 transmit_ring, u16 receive_path,
+			    u16 receive_ring)
+{
+	int ret;
+
+	mutex_lock(&xd->lock);
+
+	if (xd->transmit_path) {
+		ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
+		goto exit_unlock;
+	}
+
+	xd->transmit_path = transmit_path;
+	xd->transmit_ring = transmit_ring;
+	xd->receive_path = receive_path;
+	xd->receive_ring = receive_ring;
+
+	ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
+
+exit_unlock:
+	mutex_unlock(&xd->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+
+/**
+ * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ *
+ * This does the opposite of tb_xdomain_enable_paths(). After call to
+ * this the caller is not expected to use the rings anymore.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_disable_paths(struct tb_xdomain *xd)
+{
+	int ret = 0;
+
+	mutex_lock(&xd->lock);
+	if (xd->transmit_path) {
+		xd->transmit_path = 0;
+		xd->transmit_ring = 0;
+		xd->receive_path = 0;
+		xd->receive_ring = 0;
+
+		ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
+	}
+	mutex_unlock(&xd->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
+
+struct tb_xdomain_lookup {
+	const uuid_t *uuid;
+	u8 link;
+	u8 depth;
+	u64 route;
+};
+
+static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
+	const struct tb_xdomain_lookup *lookup)
+{
+	int i;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+		struct tb_xdomain *xd;
+
+		if (tb_is_upstream_port(port))
+			continue;
+
+		if (port->xdomain) {
+			xd = port->xdomain;
+
+			if (lookup->uuid) {
+				if (uuid_equal(xd->remote_uuid, lookup->uuid))
+					return xd;
+			} else if (lookup->link &&
+				   lookup->link == xd->link &&
+				   lookup->depth == xd->depth) {
+				return xd;
+			} else if (lookup->route &&
+				   lookup->route == xd->route) {
+				return xd;
+			}
+		} else if (port->remote) {
+			xd = switch_find_xdomain(port->remote->sw, lookup);
+			if (xd)
+				return xd;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
+ * @tb: Domain where the XDomain belongs to
+ * @uuid: UUID to look for
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+	struct tb_xdomain_lookup lookup;
+	struct tb_xdomain *xd;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.uuid = uuid;
+
+	xd = switch_find_xdomain(tb->root_switch, &lookup);
+	return tb_xdomain_get(xd);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
+
+/**
+ * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
+ * @tb: Domain where the XDomain belongs to
+ * @link: Root switch link number
+ * @depth: Depth in the link
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+						 u8 depth)
+{
+	struct tb_xdomain_lookup lookup;
+	struct tb_xdomain *xd;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.link = link;
+	lookup.depth = depth;
+
+	xd = switch_find_xdomain(tb->root_switch, &lookup);
+	return tb_xdomain_get(xd);
+}
+
+/**
+ * tb_xdomain_find_by_route() - Find an XDomain by route string
+ * @tb: Domain where the XDomain belongs to
+ * @route: XDomain route string
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
+{
+	struct tb_xdomain_lookup lookup;
+	struct tb_xdomain *xd;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.route = route;
+
+	xd = switch_find_xdomain(tb->root_switch, &lookup);
+	return tb_xdomain_get(xd);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+			       const void *buf, size_t size)
+{
+	const struct tb_protocol_handler *handler, *tmp;
+	const struct tb_xdp_header *hdr = buf;
+	unsigned int length;
+	int ret = 0;
+
+	/* We expect the packet is at least size of the header */
+	length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+	if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
+		return true;
+	if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
+		return true;
+
+	/*
+	 * Handle XDomain discovery protocol packets directly here. For
+	 * other protocols (based on their UUID) we call registered
+	 * handlers in turn.
+	 */
+	if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
+		if (type == TB_CFG_PKG_XDOMAIN_REQ) {
+			tb_xdp_schedule_request(tb, hdr, size);
+			return true;
+		}
+		return false;
+	}
+
+	mutex_lock(&xdomain_lock);
+	list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
+		if (!uuid_equal(&hdr->uuid, handler->uuid))
+			continue;
+
+		mutex_unlock(&xdomain_lock);
+		ret = handler->callback(buf, size, handler->data);
+		mutex_lock(&xdomain_lock);
+
+		if (ret)
+			break;
+	}
+	mutex_unlock(&xdomain_lock);
+
+	return ret > 0;
+}
+
+static int rebuild_property_block(void)
+{
+	u32 *block, len;
+	int ret;
+
+	ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	len = ret;
+
+	block = kcalloc(len, sizeof(u32), GFP_KERNEL);
+	if (!block)
+		return -ENOMEM;
+
+	ret = tb_property_format_dir(xdomain_property_dir, block, len);
+	if (ret) {
+		kfree(block);
+		return ret;
+	}
+
+	kfree(xdomain_property_block);
+	xdomain_property_block = block;
+	xdomain_property_block_len = len;
+	xdomain_property_block_gen++;
+
+	return 0;
+}
+
+static int update_xdomain(struct device *dev, void *data)
+{
+	struct tb_xdomain *xd;
+
+	xd = tb_to_xdomain(dev);
+	if (xd) {
+		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+				   msecs_to_jiffies(50));
+	}
+
+	return 0;
+}
+
+static void update_all_xdomains(void)
+{
+	bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
+}
+
+static bool remove_directory(const char *key, const struct tb_property_dir *dir)
+{
+	struct tb_property *p;
+
+	p = tb_property_find(xdomain_property_dir, key,
+			     TB_PROPERTY_TYPE_DIRECTORY);
+	if (p && p->value.dir == dir) {
+		tb_property_remove(p);
+		return true;
+	}
+	return false;
+}
+
+/**
+ * tb_register_property_dir() - Register property directory to the host
+ * @key: Key (name) of the directory to add
+ * @dir: Directory to add
+ *
+ * Service drivers can use this function to add new property directory
+ * to the host available properties. The other connected hosts are
+ * notified so they can re-read properties of this host if they are
+ * interested.
+ *
+ * Return: %0 on success and negative errno on failure
+ */
+int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
+{
+	int ret;
+
+	if (WARN_ON(!xdomain_property_dir))
+		return -EAGAIN;
+
+	if (!key || strlen(key) > 8)
+		return -EINVAL;
+
+	mutex_lock(&xdomain_lock);
+	if (tb_property_find(xdomain_property_dir, key,
+			     TB_PROPERTY_TYPE_DIRECTORY)) {
+		ret = -EEXIST;
+		goto err_unlock;
+	}
+
+	ret = tb_property_add_dir(xdomain_property_dir, key, dir);
+	if (ret)
+		goto err_unlock;
+
+	ret = rebuild_property_block();
+	if (ret) {
+		remove_directory(key, dir);
+		goto err_unlock;
+	}
+
+	mutex_unlock(&xdomain_lock);
+	update_all_xdomains();
+	return 0;
+
+err_unlock:
+	mutex_unlock(&xdomain_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tb_register_property_dir);
+
+/**
+ * tb_unregister_property_dir() - Removes property directory from host
+ * @key: Key (name) of the directory
+ * @dir: Directory to remove
+ *
+ * This will remove the existing directory from this host and notify the
+ * connected hosts about the change.
+ */
+void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
+{
+	int ret = 0;
+
+	mutex_lock(&xdomain_lock);
+	if (remove_directory(key, dir))
+		ret = rebuild_property_block();
+	mutex_unlock(&xdomain_lock);
+
+	if (!ret)
+		update_all_xdomains();
+}
+EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
+
+int tb_xdomain_init(void)
+{
+	int ret;
+
+	xdomain_property_dir = tb_property_create_dir(NULL);
+	if (!xdomain_property_dir)
+		return -ENOMEM;
+
+	/*
+	 * Initialize standard set of properties without any service
+	 * directories. Those will be added by service drivers
+	 * themselves when they are loaded.
+	 */
+	tb_property_add_immediate(xdomain_property_dir, "vendorid",
+				  PCI_VENDOR_ID_INTEL);
+	tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
+	tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
+	tb_property_add_text(xdomain_property_dir, "deviceid",
+			     utsname()->nodename);
+	tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
+
+	ret = rebuild_property_block();
+	if (ret) {
+		tb_property_free_dir(xdomain_property_dir);
+		xdomain_property_dir = NULL;
+	}
+
+	return ret;
+}
+
+void tb_xdomain_exit(void)
+{
+	kfree(xdomain_property_block);
+	tb_property_free_dir(xdomain_property_dir);
+}