David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. |
| 4 | * Lennox Wu <lennox.wu@sunplusct.com> |
| 5 | * Chen Liqin <liqin.chen@sunplusct.com> |
| 6 | * Copyright (C) 2012 Regents of the University of California |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/perf_event.h> |
| 14 | #include <linux/signal.h> |
| 15 | #include <linux/uaccess.h> |
| 16 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | #include <asm/ptrace.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 18 | #include <asm/tlbflush.h> |
| 19 | |
| 20 | #include "../kernel/head.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 22 | static inline void no_context(struct pt_regs *regs, unsigned long addr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | /* Are we prepared to handle this kernel fault? */ |
| 25 | if (fixup_exception(regs)) |
| 26 | return; |
| 27 | |
| 28 | /* |
| 29 | * Oops. The kernel tried to access some bad page. We'll have to |
| 30 | * terminate things with extreme prejudice. |
| 31 | */ |
| 32 | bust_spinlocks(1); |
| 33 | pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", |
| 34 | (addr < PAGE_SIZE) ? "NULL pointer dereference" : |
| 35 | "paging request", addr); |
| 36 | die(regs, "Oops"); |
| 37 | do_exit(SIGKILL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 38 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 40 | static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) |
| 41 | { |
| 42 | if (fault & VM_FAULT_OOM) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 44 | * We ran out of memory, call the OOM killer, and return the userspace |
| 45 | * (which will retry the fault, or kill us if we got oom-killed). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 47 | if (!user_mode(regs)) { |
| 48 | no_context(regs, addr); |
| 49 | return; |
| 50 | } |
| 51 | pagefault_out_of_memory(); |
| 52 | return; |
| 53 | } else if (fault & VM_FAULT_SIGBUS) { |
| 54 | /* Kernel mode? Handle exceptions or die */ |
| 55 | if (!user_mode(regs)) { |
| 56 | no_context(regs, addr); |
| 57 | return; |
| 58 | } |
| 59 | do_trap(regs, SIGBUS, BUS_ADRERR, addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | return; |
| 61 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | BUG(); |
| 63 | } |
| 64 | |
| 65 | static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) |
| 66 | { |
| 67 | /* |
| 68 | * Something tried to access memory that isn't in our memory map. |
| 69 | * Fix it, but check if it's kernel or user first. |
| 70 | */ |
| 71 | mmap_read_unlock(mm); |
| 72 | /* User mode accesses just cause a SIGSEGV */ |
| 73 | if (user_mode(regs)) { |
| 74 | do_trap(regs, SIGSEGV, code, addr); |
| 75 | return; |
| 76 | } |
| 77 | |
| 78 | no_context(regs, addr); |
| 79 | } |
| 80 | |
| 81 | static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) |
| 82 | { |
| 83 | pgd_t *pgd, *pgd_k; |
| 84 | pud_t *pud, *pud_k; |
| 85 | p4d_t *p4d, *p4d_k; |
| 86 | pmd_t *pmd, *pmd_k; |
| 87 | pte_t *pte_k; |
| 88 | int index; |
| 89 | unsigned long pfn; |
| 90 | |
| 91 | /* User mode accesses just cause a SIGSEGV */ |
| 92 | if (user_mode(regs)) |
| 93 | return do_trap(regs, SIGSEGV, code, addr); |
| 94 | |
| 95 | /* |
| 96 | * Synchronize this task's top level page-table |
| 97 | * with the 'reference' page table. |
| 98 | * |
| 99 | * Do _not_ use "tsk->active_mm->pgd" here. |
| 100 | * We might be inside an interrupt in the middle |
| 101 | * of a task switch. |
| 102 | */ |
| 103 | index = pgd_index(addr); |
| 104 | pfn = csr_read(CSR_SATP) & SATP_PPN; |
| 105 | pgd = (pgd_t *)pfn_to_virt(pfn) + index; |
| 106 | pgd_k = init_mm.pgd + index; |
| 107 | |
| 108 | if (!pgd_present(*pgd_k)) { |
| 109 | no_context(regs, addr); |
| 110 | return; |
| 111 | } |
| 112 | set_pgd(pgd, *pgd_k); |
| 113 | |
| 114 | p4d = p4d_offset(pgd, addr); |
| 115 | p4d_k = p4d_offset(pgd_k, addr); |
| 116 | if (!p4d_present(*p4d_k)) { |
| 117 | no_context(regs, addr); |
| 118 | return; |
| 119 | } |
| 120 | |
| 121 | pud = pud_offset(p4d, addr); |
| 122 | pud_k = pud_offset(p4d_k, addr); |
| 123 | if (!pud_present(*pud_k)) { |
| 124 | no_context(regs, addr); |
| 125 | return; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Since the vmalloc area is global, it is unnecessary |
| 130 | * to copy individual PTEs |
| 131 | */ |
| 132 | pmd = pmd_offset(pud, addr); |
| 133 | pmd_k = pmd_offset(pud_k, addr); |
| 134 | if (!pmd_present(*pmd_k)) { |
| 135 | no_context(regs, addr); |
| 136 | return; |
| 137 | } |
| 138 | set_pmd(pmd, *pmd_k); |
| 139 | |
| 140 | /* |
| 141 | * Make sure the actual PTE exists as well to |
| 142 | * catch kernel vmalloc-area accesses to non-mapped |
| 143 | * addresses. If we don't do this, this will just |
| 144 | * silently loop forever. |
| 145 | */ |
| 146 | pte_k = pte_offset_kernel(pmd_k, addr); |
| 147 | if (!pte_present(*pte_k)) { |
| 148 | no_context(regs, addr); |
| 149 | return; |
| 150 | } |
| 151 | |
| 152 | /* |
| 153 | * The kernel assumes that TLBs don't cache invalid |
| 154 | * entries, but in RISC-V, SFENCE.VMA specifies an |
| 155 | * ordering constraint, not a cache flush; it is |
| 156 | * necessary even after writing invalid entries. |
| 157 | */ |
| 158 | local_flush_tlb_page(addr); |
| 159 | } |
| 160 | |
| 161 | static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) |
| 162 | { |
| 163 | switch (cause) { |
| 164 | case EXC_INST_PAGE_FAULT: |
| 165 | if (!(vma->vm_flags & VM_EXEC)) { |
| 166 | return true; |
| 167 | } |
| 168 | break; |
| 169 | case EXC_LOAD_PAGE_FAULT: |
| 170 | if (!(vma->vm_flags & VM_READ)) { |
| 171 | return true; |
| 172 | } |
| 173 | break; |
| 174 | case EXC_STORE_PAGE_FAULT: |
| 175 | if (!(vma->vm_flags & VM_WRITE)) { |
| 176 | return true; |
| 177 | } |
| 178 | break; |
| 179 | default: |
| 180 | panic("%s: unhandled cause %lu", __func__, cause); |
| 181 | } |
| 182 | return false; |
| 183 | } |
| 184 | |
| 185 | /* |
| 186 | * This routine handles page faults. It determines the address and the |
| 187 | * problem, and then passes it off to one of the appropriate routines. |
| 188 | */ |
| 189 | asmlinkage void do_page_fault(struct pt_regs *regs) |
| 190 | { |
| 191 | struct task_struct *tsk; |
| 192 | struct vm_area_struct *vma; |
| 193 | struct mm_struct *mm; |
| 194 | unsigned long addr, cause; |
| 195 | unsigned int flags = FAULT_FLAG_DEFAULT; |
| 196 | int code = SEGV_MAPERR; |
| 197 | vm_fault_t fault; |
| 198 | |
| 199 | cause = regs->cause; |
| 200 | addr = regs->badaddr; |
| 201 | |
| 202 | tsk = current; |
| 203 | mm = tsk->mm; |
| 204 | |
| 205 | /* |
| 206 | * Fault-in kernel-space virtual memory on-demand. |
| 207 | * The 'reference' page table is init_mm.pgd. |
| 208 | * |
| 209 | * NOTE! We MUST NOT take any locks for this case. We may |
| 210 | * be in an interrupt or a critical region, and should |
| 211 | * only copy the information from the master page table, |
| 212 | * nothing more. |
| 213 | */ |
| 214 | if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { |
| 215 | vmalloc_fault(regs, code, addr); |
| 216 | return; |
| 217 | } |
| 218 | |
| 219 | /* Enable interrupts if they were enabled in the parent context. */ |
| 220 | if (likely(regs->status & SR_PIE)) |
| 221 | local_irq_enable(); |
| 222 | |
| 223 | /* |
| 224 | * If we're in an interrupt, have no user context, or are running |
| 225 | * in an atomic region, then we must not take the fault. |
| 226 | */ |
| 227 | if (unlikely(faulthandler_disabled() || !mm)) { |
| 228 | no_context(regs, addr); |
| 229 | return; |
| 230 | } |
| 231 | |
| 232 | if (user_mode(regs)) |
| 233 | flags |= FAULT_FLAG_USER; |
| 234 | |
| 235 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
| 236 | |
| 237 | if (cause == EXC_STORE_PAGE_FAULT) |
| 238 | flags |= FAULT_FLAG_WRITE; |
| 239 | else if (cause == EXC_INST_PAGE_FAULT) |
| 240 | flags |= FAULT_FLAG_INSTRUCTION; |
| 241 | retry: |
| 242 | mmap_read_lock(mm); |
| 243 | vma = find_vma(mm, addr); |
| 244 | if (unlikely(!vma)) { |
| 245 | bad_area(regs, mm, code, addr); |
| 246 | return; |
| 247 | } |
| 248 | if (likely(vma->vm_start <= addr)) |
| 249 | goto good_area; |
| 250 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
| 251 | bad_area(regs, mm, code, addr); |
| 252 | return; |
| 253 | } |
| 254 | if (unlikely(expand_stack(vma, addr))) { |
| 255 | bad_area(regs, mm, code, addr); |
| 256 | return; |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Ok, we have a good vm_area for this memory access, so |
| 261 | * we can handle it. |
| 262 | */ |
| 263 | good_area: |
| 264 | code = SEGV_ACCERR; |
| 265 | |
| 266 | if (unlikely(access_error(cause, vma))) { |
| 267 | bad_area(regs, mm, code, addr); |
| 268 | return; |
| 269 | } |
| 270 | |
| 271 | /* |
| 272 | * If for any reason at all we could not handle the fault, |
| 273 | * make sure we exit gracefully rather than endlessly redo |
| 274 | * the fault. |
| 275 | */ |
| 276 | fault = handle_mm_fault(vma, addr, flags, regs); |
| 277 | |
| 278 | /* |
| 279 | * If we need to retry but a fatal signal is pending, handle the |
| 280 | * signal first. We do not need to release the mmap_lock because it |
| 281 | * would already be released in __lock_page_or_retry in mm/filemap.c. |
| 282 | */ |
| 283 | if (fault_signal_pending(fault, regs)) |
| 284 | return; |
| 285 | |
| 286 | if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { |
| 287 | flags |= FAULT_FLAG_TRIED; |
| 288 | |
| 289 | /* |
| 290 | * No need to mmap_read_unlock(mm) as we would |
| 291 | * have already released it in __lock_page_or_retry |
| 292 | * in mm/filemap.c. |
| 293 | */ |
| 294 | goto retry; |
| 295 | } |
| 296 | |
| 297 | mmap_read_unlock(mm); |
| 298 | |
| 299 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 300 | mm_fault_error(regs, addr, fault); |
| 301 | return; |
| 302 | } |
| 303 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 304 | } |