Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c15c59..d658c7c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <jroedel@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) "iommu: " fmt
@@ -22,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/iommu.h>
@@ -32,19 +21,15 @@
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/property.h>
+#include <linux/fsl/mc.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
-#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
-#else
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
-#endif
-struct iommu_callback_data {
- const struct iommu_ops *ops;
-};
+static unsigned int iommu_def_domain_type __read_mostly;
+static bool iommu_dma_strict __read_mostly = true;
+static u32 iommu_cmd_line __read_mostly;
struct iommu_group {
struct kobject kobj;
@@ -74,12 +59,25 @@
};
static const char * const iommu_group_resv_type_string[] = {
- [IOMMU_RESV_DIRECT] = "direct",
- [IOMMU_RESV_RESERVED] = "reserved",
- [IOMMU_RESV_MSI] = "msi",
- [IOMMU_RESV_SW_MSI] = "msi",
+ [IOMMU_RESV_DIRECT] = "direct",
+ [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
+ [IOMMU_RESV_RESERVED] = "reserved",
+ [IOMMU_RESV_MSI] = "msi",
+ [IOMMU_RESV_SW_MSI] = "msi",
};
+#define IOMMU_CMD_LINE_DMA_API BIT(0)
+
+static void iommu_set_cmd_line_dma_api(void)
+{
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
+}
+
+static bool iommu_cmd_line_dma_api(void)
+{
+ return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
+}
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -92,12 +90,55 @@
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
+/*
+ * Use a function instead of an array here because the domain-type is a
+ * bit-field, so an array would waste memory.
+ */
+static const char *iommu_domain_type_str(unsigned int t)
+{
+ switch (t) {
+ case IOMMU_DOMAIN_BLOCKED:
+ return "Blocked";
+ case IOMMU_DOMAIN_IDENTITY:
+ return "Passthrough";
+ case IOMMU_DOMAIN_UNMANAGED:
+ return "Unmanaged";
+ case IOMMU_DOMAIN_DMA:
+ return "Translated";
+ default:
+ return "Unknown";
+ }
+}
+
+static int __init iommu_subsys_init(void)
+{
+ bool cmd_line = iommu_cmd_line_dma_api();
+
+ if (!cmd_line) {
+ if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
+ iommu_set_default_passthrough(false);
+ else
+ iommu_set_default_translated(false);
+
+ if (iommu_default_passthrough() && mem_encrypt_active()) {
+ pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
+ iommu_set_default_translated(false);
+ }
+ }
+
+ pr_info("Default domain type: %s %s\n",
+ iommu_domain_type_str(iommu_def_domain_type),
+ cmd_line ? "(set via kernel command line)" : "");
+
+ return 0;
+}
+subsys_initcall(iommu_subsys_init);
+
int iommu_device_register(struct iommu_device *iommu)
{
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
-
return 0;
}
@@ -108,6 +149,57 @@
spin_unlock(&iommu_device_lock);
}
+static struct iommu_param *iommu_get_dev_param(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+
+ if (param)
+ return param;
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
+
+ mutex_init(¶m->lock);
+ dev->iommu_param = param;
+ return param;
+}
+
+static void iommu_free_dev_param(struct device *dev)
+{
+ kfree(dev->iommu_param);
+ dev->iommu_param = NULL;
+}
+
+int iommu_probe_device(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ int ret;
+
+ WARN_ON(dev->iommu_group);
+ if (!ops)
+ return -EINVAL;
+
+ if (!iommu_get_dev_param(dev))
+ return -ENOMEM;
+
+ ret = ops->add_device(dev);
+ if (ret)
+ iommu_free_dev_param(dev);
+
+ return ret;
+}
+
+void iommu_release_device(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (dev->iommu_group)
+ ops->remove_device(dev);
+
+ iommu_free_dev_param(dev);
+}
+
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
@@ -126,11 +218,21 @@
if (ret)
return ret;
- iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+ if (pt)
+ iommu_set_default_passthrough(true);
+ else
+ iommu_set_default_translated(true);
+
return 0;
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
+static int __init iommu_dma_setup(char *str)
+{
+ return kstrtobool(str, &iommu_dma_strict);
+}
+early_param("iommu.strict", iommu_dma_setup);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -184,58 +286,58 @@
* @new: new region to insert
* @regions: list of regions
*
- * The new element is sorted by address with respect to the other
- * regions of the same type. In case it overlaps with another
- * region of the same type, regions are merged. In case it
- * overlaps with another region of different type, regions are
- * not merged.
+ * Elements are sorted by start address and overlapping segments
+ * of the same type are merged.
*/
-static int iommu_insert_resv_region(struct iommu_resv_region *new,
- struct list_head *regions)
+int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
{
- struct iommu_resv_region *region;
- phys_addr_t start = new->start;
- phys_addr_t end = new->start + new->length - 1;
- struct list_head *pos = regions->next;
+ struct iommu_resv_region *iter, *tmp, *nr, *top;
+ LIST_HEAD(stack);
- while (pos != regions) {
- struct iommu_resv_region *entry =
- list_entry(pos, struct iommu_resv_region, list);
- phys_addr_t a = entry->start;
- phys_addr_t b = entry->start + entry->length - 1;
- int type = entry->type;
-
- if (end < a) {
- goto insert;
- } else if (start > b) {
- pos = pos->next;
- } else if ((start >= a) && (end <= b)) {
- if (new->type == type)
- goto done;
- else
- pos = pos->next;
- } else {
- if (new->type == type) {
- phys_addr_t new_start = min(a, start);
- phys_addr_t new_end = max(b, end);
-
- list_del(&entry->list);
- entry->start = new_start;
- entry->length = new_end - new_start + 1;
- iommu_insert_resv_region(entry, regions);
- } else {
- pos = pos->next;
- }
- }
- }
-insert:
- region = iommu_alloc_resv_region(new->start, new->length,
- new->prot, new->type);
- if (!region)
+ nr = iommu_alloc_resv_region(new->start, new->length,
+ new->prot, new->type);
+ if (!nr)
return -ENOMEM;
- list_add_tail(®ion->list, pos);
-done:
+ /* First add the new element based on start address sorting */
+ list_for_each_entry(iter, regions, list) {
+ if (nr->start < iter->start ||
+ (nr->start == iter->start && nr->type <= iter->type))
+ break;
+ }
+ list_add_tail(&nr->list, &iter->list);
+
+ /* Merge overlapping segments of type nr->type in @regions, if any */
+ list_for_each_entry_safe(iter, tmp, regions, list) {
+ phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
+
+ /* no merge needed on elements of different types than @nr */
+ if (iter->type != nr->type) {
+ list_move_tail(&iter->list, &stack);
+ continue;
+ }
+
+ /* look for the last stack element of same type as @iter */
+ list_for_each_entry_reverse(top, &stack, list)
+ if (top->type == iter->type)
+ goto check_overlap;
+
+ list_move_tail(&iter->list, &stack);
+ continue;
+
+check_overlap:
+ top_end = top->start + top->length - 1;
+
+ if (iter->start > top_end + 1) {
+ list_move_tail(&iter->list, &stack);
+ } else {
+ top->length = max(top_end, iter_end) - top->start + 1;
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+ list_splice(&stack, regions);
return 0;
}
@@ -315,7 +417,7 @@
type = "unmanaged\n";
break;
case IOMMU_DOMAIN_DMA:
- type = "DMA";
+ type = "DMA\n";
break;
}
}
@@ -547,7 +649,8 @@
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
- if (entry->type != IOMMU_RESV_DIRECT)
+ if (entry->type != IOMMU_RESV_DIRECT &&
+ entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
continue;
for (addr = start; addr < end; addr += pg_size) {
@@ -638,7 +741,7 @@
trace_add_device_to_group(group->id, dev);
- pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
+ dev_info(dev, "Adding to iommu group %d\n", group->id);
return 0;
@@ -654,7 +757,7 @@
sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
kfree(device);
- pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -671,7 +774,7 @@
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
- pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
+ dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */
blocking_notifier_call_chain(&group->notifier,
@@ -829,6 +932,206 @@
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
/**
+ * iommu_register_device_fault_handler() - Register a device fault handler
+ * @dev: the device
+ * @handler: the fault handler
+ * @data: private data passed as argument to the handler
+ *
+ * When an IOMMU fault event is received, this handler gets called with the
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ * page faults if possible.
+ *
+ * Return 0 if the fault handler was installed successfully, or an error.
+ */
+int iommu_register_device_fault_handler(struct device *dev,
+ iommu_dev_fault_handler_t handler,
+ void *data)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(¶m->lock);
+ /* Only allow one fault handler registered for each device */
+ if (param->fault_param) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ get_device(dev);
+ param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
+ if (!param->fault_param) {
+ put_device(dev);
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ param->fault_param->handler = handler;
+ param->fault_param->data = data;
+ mutex_init(¶m->fault_param->lock);
+ INIT_LIST_HEAD(¶m->fault_param->faults);
+
+done_unlock:
+ mutex_unlock(¶m->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
+
+/**
+ * iommu_unregister_device_fault_handler() - Unregister the device fault handler
+ * @dev: the device
+ *
+ * Remove the device fault handler installed with
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_unregister_device_fault_handler(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(¶m->lock);
+
+ if (!param->fault_param)
+ goto unlock;
+
+ /* we cannot unregister handler if there are pending faults */
+ if (!list_empty(¶m->fault_param->faults)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ kfree(param->fault_param);
+ param->fault_param = NULL;
+ put_device(dev);
+unlock:
+ mutex_unlock(¶m->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
+
+/**
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
+ *
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_fault_event *evt_pending = NULL;
+ struct iommu_fault_param *fparam;
+ int ret = 0;
+
+ if (!param || !evt)
+ return -EINVAL;
+
+ /* we only report device fault if there is a handler registered */
+ mutex_lock(¶m->lock);
+ fparam = param->fault_param;
+ if (!fparam || !fparam->handler) {
+ ret = -EINVAL;
+ goto done_unlock;
+ }
+
+ if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+ (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+ GFP_KERNEL);
+ if (!evt_pending) {
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ mutex_lock(&fparam->lock);
+ list_add_tail(&evt_pending->list, &fparam->faults);
+ mutex_unlock(&fparam->lock);
+ }
+
+ ret = fparam->handler(&evt->fault, fparam->data);
+ if (ret && evt_pending) {
+ mutex_lock(&fparam->lock);
+ list_del(&evt_pending->list);
+ mutex_unlock(&fparam->lock);
+ kfree(evt_pending);
+ }
+done_unlock:
+ mutex_unlock(¶m->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+
+int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg)
+{
+ bool pasid_valid;
+ int ret = -EINVAL;
+ struct iommu_fault_event *evt;
+ struct iommu_fault_page_request *prm;
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain || !domain->ops->page_response)
+ return -ENODEV;
+
+ if (!param || !param->fault_param)
+ return -EINVAL;
+
+ if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+ msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+ return -EINVAL;
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(¶m->fault_param->lock);
+ if (list_empty(¶m->fault_param->faults)) {
+ dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+ goto done_unlock;
+ }
+ /*
+ * Check if we have a matching page request pending to respond,
+ * otherwise return -EINVAL
+ */
+ list_for_each_entry(evt, ¶m->fault_param->faults, list) {
+ prm = &evt->fault.prm;
+ pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+ if ((pasid_valid && prm->pasid != msg->pasid) ||
+ prm->grpid != msg->grpid)
+ continue;
+
+ /* Sanitize the reply */
+ msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+ ret = domain->ops->page_response(dev, evt, msg);
+ list_del(&evt->list);
+ kfree(evt);
+ break;
+ }
+
+done_unlock:
+ mutex_unlock(¶m->fault_param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
+/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -1024,6 +1327,18 @@
return iommu_group_alloc();
}
+/* Get the IOMMU group for device on fsl-mc bus */
+struct iommu_group *fsl_mc_device_group(struct device *dev)
+{
+ struct device *cont_dev = fsl_mc_cont_dev(dev);
+ struct iommu_group *group;
+
+ group = iommu_group_get(cont_dev);
+ if (!group)
+ group = iommu_group_alloc();
+ return group;
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1063,15 +1378,24 @@
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ if (dom) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ }
}
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
+
+ if (dom && !iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
}
ret = iommu_group_add_device(group, dev);
@@ -1090,16 +1414,7 @@
static int add_iommu_group(struct device *dev, void *data)
{
- struct iommu_callback_data *cb = data;
- const struct iommu_ops *ops = cb->ops;
- int ret;
-
- if (!ops->add_device)
- return 0;
-
- WARN_ON(dev->iommu_group);
-
- ret = ops->add_device(dev);
+ int ret = iommu_probe_device(dev);
/*
* We ignore -ENODEV errors for now, as they just mean that the
@@ -1114,11 +1429,7 @@
static int remove_iommu_group(struct device *dev, void *data)
{
- struct iommu_callback_data *cb = data;
- const struct iommu_ops *ops = cb->ops;
-
- if (ops->remove_device && dev->iommu_group)
- ops->remove_device(dev);
+ iommu_release_device(dev);
return 0;
}
@@ -1126,27 +1437,22 @@
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct device *dev = data;
- const struct iommu_ops *ops = dev->bus->iommu_ops;
- struct iommu_group *group;
unsigned long group_action = 0;
+ struct device *dev = data;
+ struct iommu_group *group;
/*
* ADD/DEL call into iommu driver ops if provided, which may
* result in ADD/DEL notifiers to group->notifier
*/
if (action == BUS_NOTIFY_ADD_DEVICE) {
- if (ops->add_device) {
- int ret;
+ int ret;
- ret = ops->add_device(dev);
- return (ret) ? NOTIFY_DONE : NOTIFY_OK;
- }
+ ret = iommu_probe_device(dev);
+ return (ret) ? NOTIFY_DONE : NOTIFY_OK;
} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
- if (ops->remove_device && dev->iommu_group) {
- ops->remove_device(dev);
- return 0;
- }
+ iommu_release_device(dev);
+ return NOTIFY_OK;
}
/*
@@ -1184,9 +1490,6 @@
{
int err;
struct notifier_block *nb;
- struct iommu_callback_data cb = {
- .ops = ops,
- };
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
@@ -1198,7 +1501,7 @@
if (err)
goto out_free;
- err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
+ err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
if (err)
goto out_err;
@@ -1207,7 +1510,7 @@
out_err:
/* Clean up */
- bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
+ bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus_unregister_notifier(bus, nb);
out_free:
@@ -1416,7 +1719,16 @@
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
/*
- * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * For IOMMU_DOMAIN_DMA implementations which already provide their own
+ * guarantees that the group and its default domain are valid and correct.
+ */
+struct iommu_domain *iommu_get_dma_domain(struct device *dev)
+{
+ return dev->iommu_group->default_domain;
+}
+
+/*
+ * IOMMU groups are really the natural working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by
* iterating over the devices in a group. Ideally we'd have a single
* device which represents the requestor ID of the group, but we also
@@ -1545,13 +1857,14 @@
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
+ const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
phys_addr_t orig_paddr = paddr;
int ret = 0;
- if (unlikely(domain->ops->map == NULL ||
+ if (unlikely(ops->map == NULL ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
@@ -1580,7 +1893,7 @@
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1589,6 +1902,9 @@
size -= pgsize;
}
+ if (ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain);
+
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
@@ -1601,7 +1917,7 @@
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
- bool sync)
+ struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
@@ -1638,13 +1954,10 @@
while (unmapped < size) {
size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
- unmapped_page = ops->unmap(domain, iova, pgsize);
+ unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
if (!unmapped_page)
break;
- if (sync && ops->iotlb_range_add)
- ops->iotlb_range_add(domain, iova, pgsize);
-
pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
iova, unmapped_page);
@@ -1652,9 +1965,6 @@
unmapped += unmapped_page;
}
- if (sync && ops->iotlb_sync)
- ops->iotlb_sync(domain);
-
trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
@@ -1662,47 +1972,54 @@
size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- return __iommu_unmap(domain, iova, size, true);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t ret;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
+ iommu_tlb_sync(domain, &iotlb_gather);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
size_t iommu_unmap_fast(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- return __iommu_unmap(domain, iova, size, false);
+ return __iommu_unmap(domain, iova, size, iotlb_gather);
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
- struct scatterlist *s;
- size_t mapped = 0;
- unsigned int i, min_pagesz;
+ size_t len = 0, mapped = 0;
+ phys_addr_t start;
+ unsigned int i = 0;
int ret;
- if (unlikely(domain->pgsize_bitmap == 0UL))
- return 0;
+ while (i <= nents) {
+ phys_addr_t s_phys = sg_phys(sg);
- min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
+ if (len && s_phys != start + len) {
+ ret = iommu_map(domain, iova + mapped, start, len, prot);
+ if (ret)
+ goto out_err;
- for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+ mapped += len;
+ len = 0;
+ }
- /*
- * We are mapping on IOMMU page boundaries, so offset within
- * the page must be 0. However, the IOMMU may support pages
- * smaller than PAGE_SIZE, so s->offset may still represent
- * an offset of that boundary within the CPU page.
- */
- if (!IS_ALIGNED(s->offset, min_pagesz))
- goto out_err;
+ if (len) {
+ len += sg->length;
+ } else {
+ len = sg->length;
+ start = s_phys;
+ }
- ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
- if (ret)
- goto out_err;
-
- mapped += s->length;
+ if (++i < nents)
+ sg = sg_next(sg);
}
return mapped;
@@ -1796,7 +2113,6 @@
struct iommu_domain_geometry *geometry;
bool *paging;
int ret = 0;
- u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -1808,15 +2124,6 @@
paging = data;
*paging = (domain->pgsize_bitmap != 0UL);
break;
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_get_windows != NULL)
- *count = domain->ops->domain_get_windows(domain);
- else
- ret = -ENODEV;
-
- break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -1832,18 +2139,8 @@
enum iommu_attr attr, void *data)
{
int ret = 0;
- u32 *count;
switch (attr) {
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_set_windows != NULL)
- ret = domain->ops->domain_set_windows(domain, *count);
- else
- ret = -ENODEV;
-
- break;
default:
if (domain->ops->domain_set_attr == NULL)
return -EINVAL;
@@ -1889,24 +2186,22 @@
return region;
}
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
+static int
+request_default_domain_for_dev(struct device *dev, unsigned long type)
{
- struct iommu_domain *dm_domain;
+ struct iommu_domain *domain;
struct iommu_group *group;
int ret;
/* Device must already be in a group before calling this function */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ group = iommu_group_get(dev);
+ if (!group)
+ return -EINVAL;
mutex_lock(&group->mutex);
- /* Check if the default domain is already direct mapped */
ret = 0;
- if (group->default_domain &&
- group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (group->default_domain && group->default_domain->type == type)
goto out;
/* Don't change mappings of existing devices */
@@ -1914,25 +2209,27 @@
if (iommu_group_device_count(group) != 1)
goto out;
- /* Allocate a direct mapped domain */
ret = -ENOMEM;
- dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
- if (!dm_domain)
+ domain = __iommu_domain_alloc(dev->bus, type);
+ if (!domain)
goto out;
/* Attach the device to the domain */
- ret = __iommu_attach_group(dm_domain, group);
+ ret = __iommu_attach_group(domain, group);
if (ret) {
- iommu_domain_free(dm_domain);
+ iommu_domain_free(domain);
goto out;
}
- /* Make the direct mapped domain the default for this group */
+ iommu_group_create_direct_mappings(group, dev);
+
+ /* Make the domain the default for this group */
if (group->default_domain)
iommu_domain_free(group->default_domain);
- group->default_domain = dm_domain;
+ group->default_domain = domain;
- pr_info("Using direct mapping for device %s\n", dev_name(dev));
+ dev_info(dev, "Using iommu %s mapping\n",
+ type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
ret = 0;
out:
@@ -1942,6 +2239,40 @@
return ret;
}
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
+}
+
+/* Request that a device can't be direct mapped by the IOMMU */
+int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
+}
+
+void iommu_set_default_passthrough(bool cmd_line)
+{
+ if (cmd_line)
+ iommu_set_cmd_line_dma_api();
+
+ iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+}
+
+void iommu_set_default_translated(bool cmd_line)
+{
+ if (cmd_line)
+ iommu_set_cmd_line_dma_api();
+
+ iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+}
+
+bool iommu_default_passthrough(void)
+{
+ return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
+}
+EXPORT_SYMBOL_GPL(iommu_default_passthrough);
+
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
@@ -1960,7 +2291,7 @@
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (fwspec)
return ops == fwspec->ops ? 0 : -EINVAL;
@@ -1972,26 +2303,26 @@
of_node_get(to_of_node(iommu_fwnode));
fwspec->iommu_fwnode = iommu_fwnode;
fwspec->ops = ops;
- dev->iommu_fwspec = fwspec;
+ dev_iommu_fwspec_set(dev, fwspec);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_init);
void iommu_fwspec_free(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (fwspec) {
fwnode_handle_put(fwspec->iommu_fwnode);
kfree(fwspec);
- dev->iommu_fwspec = NULL;
+ dev_iommu_fwspec_set(dev, NULL);
}
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
size_t size;
int i;
@@ -2000,11 +2331,11 @@
size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
if (size > sizeof(*fwspec)) {
- fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
+ fwspec = krealloc(fwspec, size, GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
- dev->iommu_fwspec = fwspec;
+ dev_iommu_fwspec_set(dev, fwspec);
}
for (i = 0; i < num_ids; i++)
@@ -2014,3 +2345,203 @@
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+/*
+ * Per device IOMMU features.
+ */
+bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_has_feat)
+ return ops->dev_has_feat(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
+
+int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_enable_feat)
+ return ops->dev_enable_feat(dev, feat);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+
+/*
+ * The device drivers should do the necessary cleanups before calling this.
+ * For example, before disabling the aux-domain feature, the device driver
+ * should detach all aux-domains. Otherwise, this will return -EBUSY.
+ */
+int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_disable_feat)
+ return ops->dev_disable_feat(dev, feat);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+
+bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (ops && ops->dev_feat_enabled)
+ return ops->dev_feat_enabled(dev, feat);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
+
+/*
+ * Aux-domain specific attach/detach.
+ *
+ * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
+ * true. Also, as long as domains are attached to a device through this
+ * interface, any tries to call iommu_attach_device() should fail
+ * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
+ * This should make us safe against a device being attached to a guest as a
+ * whole while there are still pasid users on it (aux and sva).
+ */
+int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_attach_dev)
+ ret = domain->ops->aux_attach_dev(domain, dev);
+
+ if (!ret)
+ trace_attach_device_to_domain(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
+
+void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+{
+ if (domain->ops->aux_detach_dev) {
+ domain->ops->aux_detach_dev(domain, dev);
+ trace_detach_device_from_domain(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
+
+int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = -ENODEV;
+
+ if (domain->ops->aux_get_pasid)
+ ret = domain->ops->aux_get_pasid(domain, dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to it
+ *
+ * Create a bond between device and address space, allowing the device to access
+ * the mm using the returned PASID. If a bond already exists between @device and
+ * @mm, it is returned and an additional reference is taken. Caller must call
+ * iommu_sva_unbind_device() to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_group *group;
+ struct iommu_sva *handle = ERR_PTR(-EINVAL);
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_bind)
+ return ERR_PTR(-ENODEV);
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ /* Ensure device count and domain don't change while we're binding */
+ mutex_lock(&group->mutex);
+
+ /*
+ * To keep things simple, SVA currently doesn't support IOMMU groups
+ * with more than one device. Existing SVA-capable systems are not
+ * affected by the problems that required IOMMU groups (lack of ACS
+ * isolation, device ID aliasing and other hardware issues).
+ */
+ if (iommu_group_device_count(group) != 1)
+ goto out_unlock;
+
+ handle = ops->sva_bind(dev, mm, drvdata);
+
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ *
+ * Returns 0 on success, or an error value
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+ struct iommu_group *group;
+ struct device *dev = handle->dev;
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_unbind)
+ return;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return;
+
+ mutex_lock(&group->mutex);
+ ops->sva_unbind(handle);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+int iommu_sva_set_ops(struct iommu_sva *handle,
+ const struct iommu_sva_ops *sva_ops)
+{
+ if (handle->ops && handle->ops != sva_ops)
+ return -EEXIST;
+
+ handle->ops = sva_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
+
+int iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+
+ if (!ops || !ops->sva_get_pasid)
+ return IOMMU_PASID_INVALID;
+
+ return ops->sva_get_pasid(handle);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);