Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/mm/nommu.c b/mm/nommu.c
index e4aac33..99b7ec3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/nommu.c
*
@@ -107,97 +108,9 @@
* The ksize() function is only guaranteed to work for pointers
* returned by kmalloc(). So handle arbitrary pointers here.
*/
- return PAGE_SIZE << compound_order(page);
+ return page_size(page);
}
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking)
-{
- struct vm_area_struct *vma;
- unsigned long vm_flags;
- int i;
-
- /* calculate required read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (foll_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (foll_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
- for (i = 0; i < nr_pages; i++) {
- vma = find_vma(mm, start);
- if (!vma)
- goto finish_or_fault;
-
- /* protect what we can, including chardevs */
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- goto finish_or_fault;
-
- if (pages) {
- pages[i] = virt_to_page(start);
- if (pages[i])
- get_page(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- start = (start + PAGE_SIZE) & PAGE_MASK;
- }
-
- return i;
-
-finish_or_fault:
- return i ? : -EFAULT;
-}
-
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- * slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas)
-{
- return __get_user_pages(current, current->mm, start, nr_pages,
- gup_flags, pages, vmas, NULL);
-}
-EXPORT_SYMBOL(get_user_pages);
-
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-static long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- down_read(&mm->mmap_sem);
- ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
- NULL, NULL);
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
@@ -473,6 +386,20 @@
}
EXPORT_SYMBOL(vm_insert_page);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vm_map_pages);
+
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vm_map_pages_zero);
+
/*
* sys_brk() for the most part doesn't need the global kernel
* lock, except when an application is doing something nasty
@@ -1334,7 +1261,9 @@
add_nommu_region(region);
/* clear anonymous mappings that don't ask for uninitialized data */
- if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
+ if (!vma->vm_file &&
+ (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
+ !(flags & MAP_UNINITIALIZED)))
memset((void *)region->vm_start, 0,
region->vm_end - region->vm_start);
@@ -1709,11 +1638,9 @@
return ret;
}
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask)
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned int foll_flags)
{
- *page_mask = 0;
return NULL;
}
@@ -1779,7 +1706,8 @@
struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem))
+ return 0;
/* the access must start within one of the target process's mappings */
vma = find_vma(mm, addr);