Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 29bac53..e90c267 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/ioasid.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -30,12 +31,6 @@
* if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
-/*
- * Non-coherent masters on few Qualcomm SoCs can use this page protection flag
- * to set correct cacheability attributes to use an outer level of cache -
- * last level cache, aka system cache.
- */
-#define IOMMU_QCOM_SYS_CACHE (1 << 6)
struct iommu_ops;
struct iommu_group;
@@ -52,8 +47,6 @@
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
- void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
@@ -170,32 +163,13 @@
#define IOMMU_PASID_INVALID (-1U)
-/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- * @mm_exit returns, the device must not issue any more transaction
- * with the PASID given as argument.
- *
- * The @mm_exit handler is allowed to sleep. Be careful about the
- * locks taken in @mm_exit, because they might lead to deadlocks if
- * they are also held when dropping references to the mm. Consider the
- * following call chain:
- * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- * Using mmput_async() prevents this scenario.
- *
- */
-struct iommu_sva_ops {
- iommu_mm_exit_handler_t mm_exit;
-};
-
#ifdef CONFIG_IOMMU_API
/**
* struct iommu_iotlb_gather - Range information for a pending IOTLB flush
*
* @start: IOVA representing the start of the range to be flushed
- * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
*
* This structure is intended to be updated by multiple calls to the
@@ -222,8 +196,10 @@
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
@@ -244,7 +220,15 @@
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
+ * @cache_invalidate: invalidate translation caches
+ * @sva_bind_gpasid: bind guest pasid and mm
+ * @sva_unbind_gpasid: unbind guest pasid and mm
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -256,7 +240,7 @@
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
@@ -264,8 +248,9 @@
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
@@ -301,13 +286,22 @@
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
void *drvdata);
void (*sva_unbind)(struct iommu_sva *handle);
- int (*sva_get_pasid)(struct iommu_sva *handle);
+ u32 (*sva_get_pasid)(struct iommu_sva *handle);
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+ int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+ int (*sva_bind_gpasid)(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+
+ int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
+
+ int (*def_domain_type)(struct device *dev);
unsigned long pgsize_bitmap;
+ struct module *owner;
};
/**
@@ -353,17 +347,22 @@
};
/**
- * struct iommu_param - collection of per-device IOMMU data
+ * struct dev_iommu - Collection of per-device IOMMU data
*
* @fault_param: IOMMU detected device fault reporting data
+ * @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
+ * @priv: IOMMU Driver private data
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
- * struct iommu_fwspec *iommu_fwspec;
*/
-struct iommu_param {
+struct dev_iommu {
struct mutex lock;
- struct iommu_fault_param *fault_param;
+ struct iommu_fault_param *fault_param;
+ struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
+ void *priv;
};
int iommu_device_register(struct iommu_device *iommu);
@@ -376,12 +375,19 @@
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline void __iommu_device_set_ops(struct iommu_device *iommu,
+ const struct iommu_ops *ops)
{
iommu->ops = ops;
}
+#define iommu_device_set_ops(iommu, ops) \
+do { \
+ struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
+ __ops->owner = THIS_MODULE; \
+ __iommu_device_set_ops(iommu, __ops); \
+} while (0)
+
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
@@ -408,6 +414,7 @@
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
+extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
@@ -417,10 +424,22 @@
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ void __user *uinfo);
+
+extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, void __user *udata);
+extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, void __user *udata);
+extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -428,14 +447,17 @@
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
+extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
-extern int iommu_request_dma_domain_for_dev(struct device *dev);
+extern void generic_iommu_put_resv_regions(struct device *dev,
+ struct list_head *list);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
@@ -479,7 +501,6 @@
struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
@@ -496,13 +517,13 @@
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain);
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
if (domain->ops->iotlb_sync)
@@ -515,7 +536,7 @@
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size;
+ unsigned long start = iova, end = start + size - 1;
/*
* If the new page is disjoint from the current range or is mapped at
@@ -523,9 +544,9 @@
* structure can be rewritten.
*/
if (gather->pgsize != size ||
- end < gather->start || start > gather->end) {
+ end + 1 < gather->start || start > gather->end + 1) {
if (gather->pgsize)
- iommu_tlb_sync(domain, gather);
+ iommu_iotlb_sync(domain, gather);
gather->pgsize = size;
}
@@ -548,16 +569,17 @@
* @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
* @iommu_priv: IOMMU driver private data for this device
+ * @num_pasid_bits: number of PASID bits supported by this device
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
*/
struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
- void *iommu_priv;
u32 flags;
+ u32 num_pasid_bits;
unsigned int num_ids;
- u32 ids[1];
+ u32 ids[];
};
/* ATS is supported */
@@ -568,7 +590,6 @@
*/
struct iommu_sva {
struct device *dev;
- const struct iommu_sva_ops *ops;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -579,13 +600,29 @@
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
- return dev->iommu_fwspec;
+ if (dev->iommu)
+ return dev->iommu->fwspec;
+ else
+ return NULL;
}
static inline void dev_iommu_fwspec_set(struct device *dev,
struct iommu_fwspec *fwspec)
{
- dev->iommu_fwspec = fwspec;
+ dev->iommu->fwspec = fwspec;
+}
+
+static inline void *dev_iommu_priv_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
+}
+
+static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+ dev->iommu->priv = priv;
}
int iommu_probe_device(struct device *dev);
@@ -603,9 +640,7 @@
struct mm_struct *mm,
void *drvdata);
void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops);
-int iommu_sva_get_pasid(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
#else /* CONFIG_IOMMU_API */
@@ -662,6 +697,13 @@
return -ENODEV;
}
+static inline int iommu_map_atomic(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -682,11 +724,18 @@
return 0;
}
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return 0;
+}
+
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
}
@@ -729,16 +778,6 @@
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
static inline void iommu_set_default_passthrough(bool cmd_line)
{
}
@@ -994,19 +1033,60 @@
{
}
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops)
-{
- return -EINVAL;
-}
-
-static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
return IOMMU_PASID_INVALID;
}
+static inline int
+iommu_uapi_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, void __user *udata)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, void __user *udata)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev,
+ ioasid_t pasid)
+{
+ return -ENODEV;
+}
+
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+ return NULL;
+}
#endif /* CONFIG_IOMMU_API */
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
#ifdef CONFIG_IOMMU_DEBUGFS
extern struct dentry *iommu_debugfs_dir;
void iommu_debugfs_setup(void);