Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 133ba78..1bd8e6a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
-#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@@ -11,11 +10,42 @@
struct mmu_notifier;
struct mmu_notifier_ops;
-/* mmu_notifier_ops flags */
-#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
+/**
+ * enum mmu_notifier_event - reason for the mmu notifier callback
+ * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
+ * move the range
+ *
+ * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
+ * madvise() or replacing a page by another one, ...).
+ *
+ * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
+ * ie using the vma access permission (vm_page_prot) to update the whole range
+ * is enough no need to inspect changes to the CPU page table (mprotect()
+ * syscall)
+ *
+ * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
+ * pages in the range so to mirror those changes the user must inspect the CPU
+ * page table (from the end callback).
+ *
+ * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
+ * access flags). User should soft dirty the page in the end callback to make
+ * sure that anyone relying on soft dirtyness catch pages that might be written
+ * through non CPU mappings.
+ */
+enum mmu_notifier_event {
+ MMU_NOTIFY_UNMAP = 0,
+ MMU_NOTIFY_CLEAR,
+ MMU_NOTIFY_PROTECTION_VMA,
+ MMU_NOTIFY_PROTECTION_PAGE,
+ MMU_NOTIFY_SOFT_DIRTY,
+};
#ifdef CONFIG_MMU_NOTIFIER
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
/*
* The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected
@@ -29,16 +59,18 @@
spinlock_t lock;
};
-struct mmu_notifier_ops {
- /*
- * Flags to specify behavior of callbacks for this MMU notifier.
- * Used to determine which context an operation may be called.
- *
- * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
- * block
- */
- int flags;
+#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
+struct mmu_notifier_range {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+};
+
+struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
@@ -153,16 +185,15 @@
*
* If blockable argument is set to false then the callback cannot
* sleep and has to return with -EAGAIN. 0 should be returned
- * otherwise.
+ * otherwise. Please note that if invalidate_range_start approves
+ * a non-blocking behavior then the same applies to
+ * invalidate_range_end.
*
*/
int (*invalidate_range_start)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end,
- bool blockable);
+ const struct mmu_notifier_range *range);
void (*invalidate_range_end)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
/*
* invalidate_range() is either called between
@@ -181,13 +212,22 @@
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
- *
- * If this callback cannot block, and invalidate_range_{start,end}
- * cannot block, mmu_notifier_ops.flags should have
- * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
+
+ /*
+ * These callbacks are used with the get/put interface to manage the
+ * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
+ * notifier for use with the mm.
+ *
+ * free_notifier() is only called after the mmu_notifier has been
+ * fully put, calls to any ops callback are prevented and no ops
+ * callbacks are currently running. It is called from a SRCU callback
+ * and cannot sleep.
+ */
+ struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
+ void (*free_notifier)(struct mmu_notifier *mn);
};
/*
@@ -204,6 +244,9 @@
struct mmu_notifier {
struct hlist_node hlist;
const struct mmu_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct rcu_head rcu;
+ unsigned int users;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -211,14 +254,27 @@
return unlikely(mm->mmu_notifier_mm);
}
+struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
+ struct mm_struct *mm);
+static inline struct mmu_notifier *
+mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
+{
+ struct mmu_notifier *ret;
+
+ down_write(&mm->mmap_sem);
+ ret = mmu_notifier_get_locked(ops, mm);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_synchronize(void);
+
extern int mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern int __mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
struct mm_struct *mm);
-extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
- struct mm_struct *mm);
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
@@ -231,15 +287,19 @@
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end,
- bool blockable);
-extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end,
+extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
-extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
+extern bool
+mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
+}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -280,33 +340,48 @@
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_start(mm, start, end, true);
+ might_sleep();
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ if (mm_has_notifiers(range->mm)) {
+ range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
+ __mmu_notifier_invalidate_range_start(range);
+ }
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- return __mmu_notifier_invalidate_range_start(mm, start, end, false);
- return 0;
+ int ret = 0;
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ if (mm_has_notifiers(range->mm)) {
+ range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
+ ret = __mmu_notifier_invalidate_range_start(range);
+ }
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ return ret;
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end, false);
+ if (mmu_notifier_range_blockable(range))
+ might_sleep();
+
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, false);
}
-static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end, true);
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, true);
}
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -327,6 +402,23 @@
__mmu_notifier_mm_destroy(mm);
}
+
+static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
+ enum mmu_notifier_event event,
+ unsigned flags,
+ struct vm_area_struct *vma,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ range->vma = vma;
+ range->event = event;
+ range->mm = mm;
+ range->start = start;
+ range->end = end;
+ range->flags = flags;
+}
+
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
@@ -434,12 +526,30 @@
set_pte_at(___mm, ___address, __ptep, ___pte); \
})
-extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
- void (*func)(struct rcu_head *rcu));
-extern void mmu_notifier_synchronize(void);
-
#else /* CONFIG_MMU_NOTIFIER */
+struct mmu_notifier_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
+ unsigned long start,
+ unsigned long end)
+{
+ range->start = start;
+ range->end = end;
+}
+
+#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
+ _mmu_notifier_range_init(range, start, end)
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return true;
+}
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return 0;
@@ -467,24 +577,24 @@
{
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
}
-static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
return 0;
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline
+void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
}
@@ -493,11 +603,6 @@
{
}
-static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
-{
- return false;
-}
-
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
@@ -506,6 +611,8 @@
{
}
+#define mmu_notifier_range_update_to_read_only(r) false
+
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
@@ -515,6 +622,10 @@
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
#define set_pte_at_notify set_pte_at
+static inline void mmu_notifier_synchronize(void)
+{
+}
+
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* _LINUX_MMU_NOTIFIER_H */