Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 640ff2b..9a3d451 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -18,14 +18,44 @@
#include <asm/tlbflush.h>
#include "internal.h"
+static __always_inline
+struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
+ unsigned long dst_start,
+ unsigned long len)
+{
+ /*
+ * Make sure that the dst range is both valid and fully within a
+ * single existing vma.
+ */
+ struct vm_area_struct *dst_vma;
+
+ dst_vma = find_vma(dst_mm, dst_start);
+ if (!dst_vma)
+ return NULL;
+
+ if (dst_start < dst_vma->vm_start ||
+ dst_start + len > dst_vma->vm_end)
+ return NULL;
+
+ /*
+ * Check the vma is registered in uffd, this is required to
+ * enforce the VM_MAYWRITE check done at uffd registration
+ * time.
+ */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ return NULL;
+
+ return dst_vma;
+}
+
static int mcopy_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep)
+ struct page **pagep,
+ bool wp_copy)
{
- struct mem_cgroup *memcg;
pte_t _dst_pte, *dst_pte;
spinlock_t *ptl;
void *page_kaddr;
@@ -46,7 +76,7 @@
PAGE_SIZE);
kunmap_atomic(page_kaddr);
- /* fallback to copy_from_user outside mmap_sem */
+ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
ret = -ENOENT;
*pagep = page;
@@ -60,18 +90,22 @@
/*
* The memory barrier inside __SetPageUptodate makes sure that
- * preceeding stores to the page contents become visible before
+ * preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
ret = -ENOMEM;
- if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
+ if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
goto out_release;
- _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
- if (dst_vma->vm_flags & VM_WRITE)
- _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+ _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
+ if (dst_vma->vm_flags & VM_WRITE) {
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(_dst_pte);
+ else
+ _dst_pte = pte_mkwrite(_dst_pte);
+ }
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
if (dst_vma->vm_file) {
@@ -89,8 +123,7 @@
inc_mm_counter(dst_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
- mem_cgroup_commit_charge(page, memcg, false, false);
- lru_cache_add_active_or_unevictable(page, dst_vma);
+ lru_cache_add_inactive_or_unevictable(page, dst_vma);
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -103,7 +136,6 @@
return ret;
out_release_uncharge_unlock:
pte_unmap_unlock(dst_pte, ptl);
- mem_cgroup_cancel_charge(page, memcg, false);
out_release:
put_page(page);
goto out;
@@ -168,7 +200,7 @@
#ifdef CONFIG_HUGETLB_PAGE
/*
* __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
- * called with mmap_sem held, it will release mmap_sem before returning.
+ * called with mmap_lock held, it will release mmap_lock before returning.
*/
static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
@@ -184,7 +216,6 @@
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
- struct hstate *h;
unsigned long vma_hpagesize;
pgoff_t idx;
u32 hash;
@@ -197,7 +228,7 @@
* feature is not supported.
*/
if (zeropage) {
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
return -EINVAL;
}
@@ -216,25 +247,14 @@
retry:
/*
- * On routine entry dst_vma is set. If we had to drop mmap_sem and
+ * On routine entry dst_vma is set. If we had to drop mmap_lock and
* retry, dst_vma will be set to NULL and we must lookup again.
*/
if (!dst_vma) {
err = -ENOENT;
- dst_vma = find_vma(dst_mm, dst_start);
+ dst_vma = find_dst_vma(dst_mm, dst_start, len);
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
- /*
- * Check the vma is registered in uffd, this is
- * required to enforce the VM_MAYWRITE check done at
- * uffd registration time.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
- if (dst_start < dst_vma->vm_start ||
- dst_start + len > dst_vma->vm_end)
- goto out_unlock;
err = -EINVAL;
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
@@ -243,10 +263,6 @@
vm_shared = dst_vma->vm_flags & VM_SHARED;
}
- if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
- (len - copied) & (vma_hpagesize - 1)))
- goto out_unlock;
-
/*
* If not shared, ensure the dst_vma has a anon_vma.
*/
@@ -256,26 +272,28 @@
goto out_unlock;
}
- h = hstate_vma(dst_vma);
-
while (src_addr < src_start + len) {
pte_t dst_pteval;
BUG_ON(dst_addr >= dst_start + len);
- VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
- * Serialize via hugetlb_fault_mutex
+ * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
+ * i_mmap_rwsem ensures the dst_pte remains valid even
+ * in the case of shared pmds. fault mutex prevents
+ * races with other faulting threads.
*/
- idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
- hash = hugetlb_fault_mutex_hash(h, mapping, idx);
+ i_mmap_lock_read(mapping);
+ idx = linear_page_index(dst_vma, dst_addr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
- dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
+ dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -283,6 +301,7 @@
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -290,22 +309,24 @@
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
vm_alloc_shared = vm_shared;
cond_resched();
if (unlikely(err == -ENOENT)) {
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
- pages_per_huge_page(h), true);
+ vma_hpagesize / PAGE_SIZE,
+ true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
}
- down_read(&dst_mm->mmap_sem);
+ mmap_read_lock(dst_mm);
dst_vma = NULL;
goto retry;
@@ -325,7 +346,7 @@
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
out:
if (page) {
/*
@@ -336,7 +357,7 @@
* private and shared mappings. See the routine
* restore_reserve_on_error for details. Unfortunately, we
* can not call restore_reserve_on_error now as it would
- * require holding mmap_sem.
+ * require holding mmap_lock.
*
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
@@ -396,7 +417,8 @@
unsigned long dst_addr,
unsigned long src_addr,
struct page **page,
- bool zeropage)
+ bool zeropage,
+ bool wp_copy)
{
ssize_t err;
@@ -413,11 +435,13 @@
if (!(dst_vma->vm_flags & VM_SHARED)) {
if (!zeropage)
err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, src_addr, page);
+ dst_addr, src_addr, page,
+ wp_copy);
else
err = mfill_zeropage_pte(dst_mm, dst_pmd,
dst_vma, dst_addr);
} else {
+ VM_WARN_ON_ONCE(wp_copy);
if (!zeropage)
err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
dst_vma, dst_addr,
@@ -435,7 +459,8 @@
unsigned long src_start,
unsigned long len,
bool zeropage,
- bool *mmap_changing)
+ bool *mmap_changing,
+ __u64 mode)
{
struct vm_area_struct *dst_vma;
ssize_t err;
@@ -443,6 +468,7 @@
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
+ bool wp_copy;
/*
* Sanitize the command parameters:
@@ -459,7 +485,7 @@
copied = 0;
page = NULL;
retry:
- down_read(&dst_mm->mmap_sem);
+ mmap_read_lock(dst_mm);
/*
* If memory mappings are changing because of non-cooperative
@@ -475,20 +501,9 @@
* both valid and fully within a single existing vma.
*/
err = -ENOENT;
- dst_vma = find_vma(dst_mm, dst_start);
+ dst_vma = find_dst_vma(dst_mm, dst_start, len);
if (!dst_vma)
goto out_unlock;
- /*
- * Check the vma is registered in uffd, this is required to
- * enforce the VM_MAYWRITE check done at uffd registration
- * time.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
- if (dst_start < dst_vma->vm_start ||
- dst_start + len > dst_vma->vm_end)
- goto out_unlock;
err = -EINVAL;
/*
@@ -500,6 +515,14 @@
goto out_unlock;
/*
+ * validate 'mode' now that we know the dst_vma: don't allow
+ * a wrprotect copy if the userfaultfd didn't register as WP.
+ */
+ wp_copy = mode & UFFDIO_COPY_MODE_WP;
+ if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
+ goto out_unlock;
+
+ /*
* If this is a HUGETLB vma, pass off to appropriate routine
*/
if (is_vm_hugetlb_page(dst_vma))
@@ -554,13 +577,13 @@
BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- src_addr, &page, zeropage);
+ src_addr, &page, zeropage, wp_copy);
cond_resched();
if (unlikely(err == -ENOENT)) {
void *page_kaddr;
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
BUG_ON(!page);
page_kaddr = kmap(page);
@@ -589,7 +612,7 @@
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
out:
if (page)
put_page(page);
@@ -601,14 +624,68 @@
ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
- bool *mmap_changing)
+ bool *mmap_changing, __u64 mode)
{
return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
- mmap_changing);
+ mmap_changing, mode);
}
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
unsigned long len, bool *mmap_changing)
{
- return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
+ return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
+}
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, bool enable_wp, bool *mmap_changing)
+{
+ struct vm_area_struct *dst_vma;
+ pgprot_t newprot;
+ int err;
+
+ /*
+ * Sanitize the command parameters:
+ */
+ BUG_ON(start & ~PAGE_MASK);
+ BUG_ON(len & ~PAGE_MASK);
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ BUG_ON(start + len <= start);
+
+ mmap_read_lock(dst_mm);
+
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ err = -EAGAIN;
+ if (mmap_changing && READ_ONCE(*mmap_changing))
+ goto out_unlock;
+
+ err = -ENOENT;
+ dst_vma = find_dst_vma(dst_mm, start, len);
+ /*
+ * Make sure the vma is not shared, that the dst range is
+ * both valid and fully within a single existing vma.
+ */
+ if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+ goto out_unlock;
+ if (!userfaultfd_wp(dst_vma))
+ goto out_unlock;
+ if (!vma_is_anonymous(dst_vma))
+ goto out_unlock;
+
+ if (enable_wp)
+ newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+ else
+ newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+ change_protection(dst_vma, start, start + len, newprot,
+ enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+
+ err = 0;
+out_unlock:
+ mmap_read_unlock(dst_mm);
+ return err;
}