David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * mm/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2015 Red Hat, Inc. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/sched/signal.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/swap.h> |
| 13 | #include <linux/swapops.h> |
| 14 | #include <linux/userfaultfd_k.h> |
| 15 | #include <linux/mmu_notifier.h> |
| 16 | #include <linux/hugetlb.h> |
| 17 | #include <linux/shmem_fs.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | #include "internal.h" |
| 20 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 21 | static __always_inline |
| 22 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, |
| 23 | unsigned long dst_start, |
| 24 | unsigned long len) |
| 25 | { |
| 26 | /* |
| 27 | * Make sure that the dst range is both valid and fully within a |
| 28 | * single existing vma. |
| 29 | */ |
| 30 | struct vm_area_struct *dst_vma; |
| 31 | |
| 32 | dst_vma = find_vma(dst_mm, dst_start); |
| 33 | if (!dst_vma) |
| 34 | return NULL; |
| 35 | |
| 36 | if (dst_start < dst_vma->vm_start || |
| 37 | dst_start + len > dst_vma->vm_end) |
| 38 | return NULL; |
| 39 | |
| 40 | /* |
| 41 | * Check the vma is registered in uffd, this is required to |
| 42 | * enforce the VM_MAYWRITE check done at uffd registration |
| 43 | * time. |
| 44 | */ |
| 45 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
| 46 | return NULL; |
| 47 | |
| 48 | return dst_vma; |
| 49 | } |
| 50 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
| 52 | pmd_t *dst_pmd, |
| 53 | struct vm_area_struct *dst_vma, |
| 54 | unsigned long dst_addr, |
| 55 | unsigned long src_addr, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 56 | struct page **pagep, |
| 57 | bool wp_copy) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | pte_t _dst_pte, *dst_pte; |
| 60 | spinlock_t *ptl; |
| 61 | void *page_kaddr; |
| 62 | int ret; |
| 63 | struct page *page; |
| 64 | pgoff_t offset, max_off; |
| 65 | struct inode *inode; |
| 66 | |
| 67 | if (!*pagep) { |
| 68 | ret = -ENOMEM; |
| 69 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); |
| 70 | if (!page) |
| 71 | goto out; |
| 72 | |
| 73 | page_kaddr = kmap_atomic(page); |
| 74 | ret = copy_from_user(page_kaddr, |
| 75 | (const void __user *) src_addr, |
| 76 | PAGE_SIZE); |
| 77 | kunmap_atomic(page_kaddr); |
| 78 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 79 | /* fallback to copy_from_user outside mmap_lock */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | if (unlikely(ret)) { |
| 81 | ret = -ENOENT; |
| 82 | *pagep = page; |
| 83 | /* don't free the page */ |
| 84 | goto out; |
| 85 | } |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 86 | |
| 87 | flush_dcache_page(page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | } else { |
| 89 | page = *pagep; |
| 90 | *pagep = NULL; |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * The memory barrier inside __SetPageUptodate makes sure that |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 95 | * preceding stores to the page contents become visible before |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | * the set_pte_at() write. |
| 97 | */ |
| 98 | __SetPageUptodate(page); |
| 99 | |
| 100 | ret = -ENOMEM; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 101 | if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | goto out_release; |
| 103 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 104 | _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); |
| 105 | if (dst_vma->vm_flags & VM_WRITE) { |
| 106 | if (wp_copy) |
| 107 | _dst_pte = pte_mkuffd_wp(_dst_pte); |
| 108 | else |
| 109 | _dst_pte = pte_mkwrite(_dst_pte); |
| 110 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | |
| 112 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
| 113 | if (dst_vma->vm_file) { |
| 114 | /* the shmem MAP_PRIVATE case requires checking the i_size */ |
| 115 | inode = dst_vma->vm_file->f_inode; |
| 116 | offset = linear_page_index(dst_vma, dst_addr); |
| 117 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 118 | ret = -EFAULT; |
| 119 | if (unlikely(offset >= max_off)) |
| 120 | goto out_release_uncharge_unlock; |
| 121 | } |
| 122 | ret = -EEXIST; |
| 123 | if (!pte_none(*dst_pte)) |
| 124 | goto out_release_uncharge_unlock; |
| 125 | |
| 126 | inc_mm_counter(dst_mm, MM_ANONPAGES); |
| 127 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 128 | lru_cache_add_inactive_or_unevictable(page, dst_vma); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | |
| 130 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 131 | |
| 132 | /* No need to invalidate - it was non-present before */ |
| 133 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 134 | |
| 135 | pte_unmap_unlock(dst_pte, ptl); |
| 136 | ret = 0; |
| 137 | out: |
| 138 | return ret; |
| 139 | out_release_uncharge_unlock: |
| 140 | pte_unmap_unlock(dst_pte, ptl); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | out_release: |
| 142 | put_page(page); |
| 143 | goto out; |
| 144 | } |
| 145 | |
| 146 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, |
| 147 | pmd_t *dst_pmd, |
| 148 | struct vm_area_struct *dst_vma, |
| 149 | unsigned long dst_addr) |
| 150 | { |
| 151 | pte_t _dst_pte, *dst_pte; |
| 152 | spinlock_t *ptl; |
| 153 | int ret; |
| 154 | pgoff_t offset, max_off; |
| 155 | struct inode *inode; |
| 156 | |
| 157 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 158 | dst_vma->vm_page_prot)); |
| 159 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
| 160 | if (dst_vma->vm_file) { |
| 161 | /* the shmem MAP_PRIVATE case requires checking the i_size */ |
| 162 | inode = dst_vma->vm_file->f_inode; |
| 163 | offset = linear_page_index(dst_vma, dst_addr); |
| 164 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 165 | ret = -EFAULT; |
| 166 | if (unlikely(offset >= max_off)) |
| 167 | goto out_unlock; |
| 168 | } |
| 169 | ret = -EEXIST; |
| 170 | if (!pte_none(*dst_pte)) |
| 171 | goto out_unlock; |
| 172 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 173 | /* No need to invalidate - it was non-present before */ |
| 174 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 175 | ret = 0; |
| 176 | out_unlock: |
| 177 | pte_unmap_unlock(dst_pte, ptl); |
| 178 | return ret; |
| 179 | } |
| 180 | |
| 181 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 182 | { |
| 183 | pgd_t *pgd; |
| 184 | p4d_t *p4d; |
| 185 | pud_t *pud; |
| 186 | |
| 187 | pgd = pgd_offset(mm, address); |
| 188 | p4d = p4d_alloc(mm, pgd, address); |
| 189 | if (!p4d) |
| 190 | return NULL; |
| 191 | pud = pud_alloc(mm, p4d, address); |
| 192 | if (!pud) |
| 193 | return NULL; |
| 194 | /* |
| 195 | * Note that we didn't run this because the pmd was |
| 196 | * missing, the *pmd may be already established and in |
| 197 | * turn it may also be a trans_huge_pmd. |
| 198 | */ |
| 199 | return pmd_alloc(mm, pud, address); |
| 200 | } |
| 201 | |
| 202 | #ifdef CONFIG_HUGETLB_PAGE |
| 203 | /* |
| 204 | * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 205 | * called with mmap_lock held, it will release mmap_lock before returning. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | */ |
| 207 | static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, |
| 208 | struct vm_area_struct *dst_vma, |
| 209 | unsigned long dst_start, |
| 210 | unsigned long src_start, |
| 211 | unsigned long len, |
| 212 | bool zeropage) |
| 213 | { |
| 214 | int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; |
| 215 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
| 216 | ssize_t err; |
| 217 | pte_t *dst_pte; |
| 218 | unsigned long src_addr, dst_addr; |
| 219 | long copied; |
| 220 | struct page *page; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 221 | unsigned long vma_hpagesize; |
| 222 | pgoff_t idx; |
| 223 | u32 hash; |
| 224 | struct address_space *mapping; |
| 225 | |
| 226 | /* |
| 227 | * There is no default zero huge page for all huge page sizes as |
| 228 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used |
| 229 | * by THP. Since we can not reliably insert a zero page, this |
| 230 | * feature is not supported. |
| 231 | */ |
| 232 | if (zeropage) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 233 | mmap_read_unlock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 234 | return -EINVAL; |
| 235 | } |
| 236 | |
| 237 | src_addr = src_start; |
| 238 | dst_addr = dst_start; |
| 239 | copied = 0; |
| 240 | page = NULL; |
| 241 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
| 242 | |
| 243 | /* |
| 244 | * Validate alignment based on huge page size |
| 245 | */ |
| 246 | err = -EINVAL; |
| 247 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) |
| 248 | goto out_unlock; |
| 249 | |
| 250 | retry: |
| 251 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 252 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | * retry, dst_vma will be set to NULL and we must lookup again. |
| 254 | */ |
| 255 | if (!dst_vma) { |
| 256 | err = -ENOENT; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 257 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 258 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
| 259 | goto out_unlock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 260 | |
| 261 | err = -EINVAL; |
| 262 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) |
| 263 | goto out_unlock; |
| 264 | |
| 265 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
| 266 | } |
| 267 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | /* |
| 269 | * If not shared, ensure the dst_vma has a anon_vma. |
| 270 | */ |
| 271 | err = -ENOMEM; |
| 272 | if (!vm_shared) { |
| 273 | if (unlikely(anon_vma_prepare(dst_vma))) |
| 274 | goto out_unlock; |
| 275 | } |
| 276 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 277 | while (src_addr < src_start + len) { |
| 278 | pte_t dst_pteval; |
| 279 | |
| 280 | BUG_ON(dst_addr >= dst_start + len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 281 | |
| 282 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 283 | * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. |
| 284 | * i_mmap_rwsem ensures the dst_pte remains valid even |
| 285 | * in the case of shared pmds. fault mutex prevents |
| 286 | * races with other faulting threads. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 288 | mapping = dst_vma->vm_file->f_mapping; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 289 | i_mmap_lock_read(mapping); |
| 290 | idx = linear_page_index(dst_vma, dst_addr); |
| 291 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 293 | |
| 294 | err = -ENOMEM; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 295 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 296 | if (!dst_pte) { |
| 297 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 298 | i_mmap_unlock_read(mapping); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | goto out_unlock; |
| 300 | } |
| 301 | |
| 302 | err = -EEXIST; |
| 303 | dst_pteval = huge_ptep_get(dst_pte); |
| 304 | if (!huge_pte_none(dst_pteval)) { |
| 305 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 306 | i_mmap_unlock_read(mapping); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 307 | goto out_unlock; |
| 308 | } |
| 309 | |
| 310 | err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, |
| 311 | dst_addr, src_addr, &page); |
| 312 | |
| 313 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 314 | i_mmap_unlock_read(mapping); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 315 | vm_alloc_shared = vm_shared; |
| 316 | |
| 317 | cond_resched(); |
| 318 | |
| 319 | if (unlikely(err == -ENOENT)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 320 | mmap_read_unlock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 321 | BUG_ON(!page); |
| 322 | |
| 323 | err = copy_huge_page_from_user(page, |
| 324 | (const void __user *)src_addr, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 325 | vma_hpagesize / PAGE_SIZE, |
| 326 | true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | if (unlikely(err)) { |
| 328 | err = -EFAULT; |
| 329 | goto out; |
| 330 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 331 | mmap_read_lock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 332 | |
| 333 | dst_vma = NULL; |
| 334 | goto retry; |
| 335 | } else |
| 336 | BUG_ON(page); |
| 337 | |
| 338 | if (!err) { |
| 339 | dst_addr += vma_hpagesize; |
| 340 | src_addr += vma_hpagesize; |
| 341 | copied += vma_hpagesize; |
| 342 | |
| 343 | if (fatal_signal_pending(current)) |
| 344 | err = -EINTR; |
| 345 | } |
| 346 | if (err) |
| 347 | break; |
| 348 | } |
| 349 | |
| 350 | out_unlock: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 351 | mmap_read_unlock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 352 | out: |
| 353 | if (page) { |
| 354 | /* |
| 355 | * We encountered an error and are about to free a newly |
| 356 | * allocated huge page. |
| 357 | * |
| 358 | * Reservation handling is very subtle, and is different for |
| 359 | * private and shared mappings. See the routine |
| 360 | * restore_reserve_on_error for details. Unfortunately, we |
| 361 | * can not call restore_reserve_on_error now as it would |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 362 | * require holding mmap_lock. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 363 | * |
| 364 | * If a reservation for the page existed in the reservation |
| 365 | * map of a private mapping, the map was modified to indicate |
| 366 | * the reservation was consumed when the page was allocated. |
| 367 | * We clear the PagePrivate flag now so that the global |
| 368 | * reserve count will not be incremented in free_huge_page. |
| 369 | * The reservation map will still indicate the reservation |
| 370 | * was consumed and possibly prevent later page allocation. |
| 371 | * This is better than leaking a global reservation. If no |
| 372 | * reservation existed, it is still safe to clear PagePrivate |
| 373 | * as no adjustments to reservation counts were made during |
| 374 | * allocation. |
| 375 | * |
| 376 | * The reservation map for shared mappings indicates which |
| 377 | * pages have reservations. When a huge page is allocated |
| 378 | * for an address with a reservation, no change is made to |
| 379 | * the reserve map. In this case PagePrivate will be set |
| 380 | * to indicate that the global reservation count should be |
| 381 | * incremented when the page is freed. This is the desired |
| 382 | * behavior. However, when a huge page is allocated for an |
| 383 | * address without a reservation a reservation entry is added |
| 384 | * to the reservation map, and PagePrivate will not be set. |
| 385 | * When the page is freed, the global reserve count will NOT |
| 386 | * be incremented and it will appear as though we have leaked |
| 387 | * reserved page. In this case, set PagePrivate so that the |
| 388 | * global reserve count will be incremented to match the |
| 389 | * reservation map entry which was created. |
| 390 | * |
| 391 | * Note that vm_alloc_shared is based on the flags of the vma |
| 392 | * for which the page was originally allocated. dst_vma could |
| 393 | * be different or NULL on error. |
| 394 | */ |
| 395 | if (vm_alloc_shared) |
| 396 | SetPagePrivate(page); |
| 397 | else |
| 398 | ClearPagePrivate(page); |
| 399 | put_page(page); |
| 400 | } |
| 401 | BUG_ON(copied < 0); |
| 402 | BUG_ON(err > 0); |
| 403 | BUG_ON(!copied && !err); |
| 404 | return copied ? copied : err; |
| 405 | } |
| 406 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 407 | /* fail at build time if gcc attempts to use this */ |
| 408 | extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, |
| 409 | struct vm_area_struct *dst_vma, |
| 410 | unsigned long dst_start, |
| 411 | unsigned long src_start, |
| 412 | unsigned long len, |
| 413 | bool zeropage); |
| 414 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 415 | |
| 416 | static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, |
| 417 | pmd_t *dst_pmd, |
| 418 | struct vm_area_struct *dst_vma, |
| 419 | unsigned long dst_addr, |
| 420 | unsigned long src_addr, |
| 421 | struct page **page, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 422 | bool zeropage, |
| 423 | bool wp_copy) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 424 | { |
| 425 | ssize_t err; |
| 426 | |
| 427 | /* |
| 428 | * The normal page fault path for a shmem will invoke the |
| 429 | * fault, fill the hole in the file and COW it right away. The |
| 430 | * result generates plain anonymous memory. So when we are |
| 431 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll |
| 432 | * generate anonymous memory directly without actually filling |
| 433 | * the hole. For the MAP_PRIVATE case the robustness check |
| 434 | * only happens in the pagetable (to verify it's still none) |
| 435 | * and not in the radix tree. |
| 436 | */ |
| 437 | if (!(dst_vma->vm_flags & VM_SHARED)) { |
| 438 | if (!zeropage) |
| 439 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 440 | dst_addr, src_addr, page, |
| 441 | wp_copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 442 | else |
| 443 | err = mfill_zeropage_pte(dst_mm, dst_pmd, |
| 444 | dst_vma, dst_addr); |
| 445 | } else { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 446 | VM_WARN_ON_ONCE(wp_copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 447 | if (!zeropage) |
| 448 | err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, |
| 449 | dst_vma, dst_addr, |
| 450 | src_addr, page); |
| 451 | else |
| 452 | err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd, |
| 453 | dst_vma, dst_addr); |
| 454 | } |
| 455 | |
| 456 | return err; |
| 457 | } |
| 458 | |
| 459 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
| 460 | unsigned long dst_start, |
| 461 | unsigned long src_start, |
| 462 | unsigned long len, |
| 463 | bool zeropage, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 464 | bool *mmap_changing, |
| 465 | __u64 mode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 466 | { |
| 467 | struct vm_area_struct *dst_vma; |
| 468 | ssize_t err; |
| 469 | pmd_t *dst_pmd; |
| 470 | unsigned long src_addr, dst_addr; |
| 471 | long copied; |
| 472 | struct page *page; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 473 | bool wp_copy; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * Sanitize the command parameters: |
| 477 | */ |
| 478 | BUG_ON(dst_start & ~PAGE_MASK); |
| 479 | BUG_ON(len & ~PAGE_MASK); |
| 480 | |
| 481 | /* Does the address range wrap, or is the span zero-sized? */ |
| 482 | BUG_ON(src_start + len <= src_start); |
| 483 | BUG_ON(dst_start + len <= dst_start); |
| 484 | |
| 485 | src_addr = src_start; |
| 486 | dst_addr = dst_start; |
| 487 | copied = 0; |
| 488 | page = NULL; |
| 489 | retry: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 490 | mmap_read_lock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 491 | |
| 492 | /* |
| 493 | * If memory mappings are changing because of non-cooperative |
| 494 | * operation (e.g. mremap) running in parallel, bail out and |
| 495 | * request the user to retry later |
| 496 | */ |
| 497 | err = -EAGAIN; |
| 498 | if (mmap_changing && READ_ONCE(*mmap_changing)) |
| 499 | goto out_unlock; |
| 500 | |
| 501 | /* |
| 502 | * Make sure the vma is not shared, that the dst range is |
| 503 | * both valid and fully within a single existing vma. |
| 504 | */ |
| 505 | err = -ENOENT; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 506 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 507 | if (!dst_vma) |
| 508 | goto out_unlock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 509 | |
| 510 | err = -EINVAL; |
| 511 | /* |
| 512 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but |
| 513 | * it will overwrite vm_ops, so vma_is_anonymous must return false. |
| 514 | */ |
| 515 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && |
| 516 | dst_vma->vm_flags & VM_SHARED)) |
| 517 | goto out_unlock; |
| 518 | |
| 519 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 520 | * validate 'mode' now that we know the dst_vma: don't allow |
| 521 | * a wrprotect copy if the userfaultfd didn't register as WP. |
| 522 | */ |
| 523 | wp_copy = mode & UFFDIO_COPY_MODE_WP; |
| 524 | if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) |
| 525 | goto out_unlock; |
| 526 | |
| 527 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 528 | * If this is a HUGETLB vma, pass off to appropriate routine |
| 529 | */ |
| 530 | if (is_vm_hugetlb_page(dst_vma)) |
| 531 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, |
| 532 | src_start, len, zeropage); |
| 533 | |
| 534 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
| 535 | goto out_unlock; |
| 536 | |
| 537 | /* |
| 538 | * Ensure the dst_vma has a anon_vma or this page |
| 539 | * would get a NULL anon_vma when moved in the |
| 540 | * dst_vma. |
| 541 | */ |
| 542 | err = -ENOMEM; |
| 543 | if (!(dst_vma->vm_flags & VM_SHARED) && |
| 544 | unlikely(anon_vma_prepare(dst_vma))) |
| 545 | goto out_unlock; |
| 546 | |
| 547 | while (src_addr < src_start + len) { |
| 548 | pmd_t dst_pmdval; |
| 549 | |
| 550 | BUG_ON(dst_addr >= dst_start + len); |
| 551 | |
| 552 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
| 553 | if (unlikely(!dst_pmd)) { |
| 554 | err = -ENOMEM; |
| 555 | break; |
| 556 | } |
| 557 | |
| 558 | dst_pmdval = pmd_read_atomic(dst_pmd); |
| 559 | /* |
| 560 | * If the dst_pmd is mapped as THP don't |
| 561 | * override it and just be strict. |
| 562 | */ |
| 563 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 564 | err = -EEXIST; |
| 565 | break; |
| 566 | } |
| 567 | if (unlikely(pmd_none(dst_pmdval)) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 568 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 569 | err = -ENOMEM; |
| 570 | break; |
| 571 | } |
| 572 | /* If an huge pmd materialized from under us fail */ |
| 573 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
| 574 | err = -EFAULT; |
| 575 | break; |
| 576 | } |
| 577 | |
| 578 | BUG_ON(pmd_none(*dst_pmd)); |
| 579 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
| 580 | |
| 581 | err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 582 | src_addr, &page, zeropage, wp_copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 583 | cond_resched(); |
| 584 | |
| 585 | if (unlikely(err == -ENOENT)) { |
| 586 | void *page_kaddr; |
| 587 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 588 | mmap_read_unlock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 589 | BUG_ON(!page); |
| 590 | |
| 591 | page_kaddr = kmap(page); |
| 592 | err = copy_from_user(page_kaddr, |
| 593 | (const void __user *) src_addr, |
| 594 | PAGE_SIZE); |
| 595 | kunmap(page); |
| 596 | if (unlikely(err)) { |
| 597 | err = -EFAULT; |
| 598 | goto out; |
| 599 | } |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 600 | flush_dcache_page(page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 601 | goto retry; |
| 602 | } else |
| 603 | BUG_ON(page); |
| 604 | |
| 605 | if (!err) { |
| 606 | dst_addr += PAGE_SIZE; |
| 607 | src_addr += PAGE_SIZE; |
| 608 | copied += PAGE_SIZE; |
| 609 | |
| 610 | if (fatal_signal_pending(current)) |
| 611 | err = -EINTR; |
| 612 | } |
| 613 | if (err) |
| 614 | break; |
| 615 | } |
| 616 | |
| 617 | out_unlock: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 618 | mmap_read_unlock(dst_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 619 | out: |
| 620 | if (page) |
| 621 | put_page(page); |
| 622 | BUG_ON(copied < 0); |
| 623 | BUG_ON(err > 0); |
| 624 | BUG_ON(!copied && !err); |
| 625 | return copied ? copied : err; |
| 626 | } |
| 627 | |
| 628 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
| 629 | unsigned long src_start, unsigned long len, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 630 | bool *mmap_changing, __u64 mode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | { |
| 632 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 633 | mmap_changing, mode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, |
| 637 | unsigned long len, bool *mmap_changing) |
| 638 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 639 | return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0); |
| 640 | } |
| 641 | |
| 642 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, |
| 643 | unsigned long len, bool enable_wp, bool *mmap_changing) |
| 644 | { |
| 645 | struct vm_area_struct *dst_vma; |
| 646 | pgprot_t newprot; |
| 647 | int err; |
| 648 | |
| 649 | /* |
| 650 | * Sanitize the command parameters: |
| 651 | */ |
| 652 | BUG_ON(start & ~PAGE_MASK); |
| 653 | BUG_ON(len & ~PAGE_MASK); |
| 654 | |
| 655 | /* Does the address range wrap, or is the span zero-sized? */ |
| 656 | BUG_ON(start + len <= start); |
| 657 | |
| 658 | mmap_read_lock(dst_mm); |
| 659 | |
| 660 | /* |
| 661 | * If memory mappings are changing because of non-cooperative |
| 662 | * operation (e.g. mremap) running in parallel, bail out and |
| 663 | * request the user to retry later |
| 664 | */ |
| 665 | err = -EAGAIN; |
| 666 | if (mmap_changing && READ_ONCE(*mmap_changing)) |
| 667 | goto out_unlock; |
| 668 | |
| 669 | err = -ENOENT; |
| 670 | dst_vma = find_dst_vma(dst_mm, start, len); |
| 671 | /* |
| 672 | * Make sure the vma is not shared, that the dst range is |
| 673 | * both valid and fully within a single existing vma. |
| 674 | */ |
| 675 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) |
| 676 | goto out_unlock; |
| 677 | if (!userfaultfd_wp(dst_vma)) |
| 678 | goto out_unlock; |
| 679 | if (!vma_is_anonymous(dst_vma)) |
| 680 | goto out_unlock; |
| 681 | |
| 682 | if (enable_wp) |
| 683 | newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); |
| 684 | else |
| 685 | newprot = vm_get_page_prot(dst_vma->vm_flags); |
| 686 | |
| 687 | change_protection(dst_vma, start, start + len, newprot, |
| 688 | enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); |
| 689 | |
| 690 | err = 0; |
| 691 | out_unlock: |
| 692 | mmap_read_unlock(dst_mm); |
| 693 | return err; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 694 | } |