Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) |
| 3 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
| 4 | * Licensed under the GPL |
| 5 | */ |
| 6 | |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/sched/signal.h> |
| 9 | #include <linux/slab.h> |
| 10 | |
| 11 | #include <asm/pgalloc.h> |
| 12 | #include <asm/pgtable.h> |
| 13 | #include <asm/sections.h> |
| 14 | #include <as-layout.h> |
| 15 | #include <os.h> |
| 16 | #include <skas.h> |
| 17 | |
| 18 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, |
| 19 | unsigned long kernel) |
| 20 | { |
| 21 | pgd_t *pgd; |
| 22 | pud_t *pud; |
| 23 | pmd_t *pmd; |
| 24 | pte_t *pte; |
| 25 | |
| 26 | pgd = pgd_offset(mm, proc); |
| 27 | pud = pud_alloc(mm, pgd, proc); |
| 28 | if (!pud) |
| 29 | goto out; |
| 30 | |
| 31 | pmd = pmd_alloc(mm, pud, proc); |
| 32 | if (!pmd) |
| 33 | goto out_pmd; |
| 34 | |
| 35 | pte = pte_alloc_map(mm, pmd, proc); |
| 36 | if (!pte) |
| 37 | goto out_pte; |
| 38 | |
| 39 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
| 40 | *pte = pte_mkread(*pte); |
| 41 | return 0; |
| 42 | |
| 43 | out_pte: |
| 44 | pmd_free(mm, pmd); |
| 45 | out_pmd: |
| 46 | pud_free(mm, pud); |
| 47 | out: |
| 48 | return -ENOMEM; |
| 49 | } |
| 50 | |
| 51 | int init_new_context(struct task_struct *task, struct mm_struct *mm) |
| 52 | { |
| 53 | struct mm_context *from_mm = NULL; |
| 54 | struct mm_context *to_mm = &mm->context; |
| 55 | unsigned long stack = 0; |
| 56 | int ret = -ENOMEM; |
| 57 | |
| 58 | stack = get_zeroed_page(GFP_KERNEL); |
| 59 | if (stack == 0) |
| 60 | goto out; |
| 61 | |
| 62 | to_mm->id.stack = stack; |
| 63 | if (current->mm != NULL && current->mm != &init_mm) |
| 64 | from_mm = ¤t->mm->context; |
| 65 | |
| 66 | block_signals(); |
| 67 | if (from_mm) |
| 68 | to_mm->id.u.pid = copy_context_skas0(stack, |
| 69 | from_mm->id.u.pid); |
| 70 | else to_mm->id.u.pid = start_userspace(stack); |
| 71 | unblock_signals(); |
| 72 | |
| 73 | if (to_mm->id.u.pid < 0) { |
| 74 | ret = to_mm->id.u.pid; |
| 75 | goto out_free; |
| 76 | } |
| 77 | |
| 78 | ret = init_new_ldt(to_mm, from_mm); |
| 79 | if (ret < 0) { |
| 80 | printk(KERN_ERR "init_new_context_skas - init_ldt" |
| 81 | " failed, errno = %d\n", ret); |
| 82 | goto out_free; |
| 83 | } |
| 84 | |
| 85 | return 0; |
| 86 | |
| 87 | out_free: |
| 88 | if (to_mm->id.stack != 0) |
| 89 | free_page(to_mm->id.stack); |
| 90 | out: |
| 91 | return ret; |
| 92 | } |
| 93 | |
| 94 | void uml_setup_stubs(struct mm_struct *mm) |
| 95 | { |
| 96 | int err, ret; |
| 97 | |
| 98 | ret = init_stub_pte(mm, STUB_CODE, |
| 99 | (unsigned long) __syscall_stub_start); |
| 100 | if (ret) |
| 101 | goto out; |
| 102 | |
| 103 | ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); |
| 104 | if (ret) |
| 105 | goto out; |
| 106 | |
| 107 | mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); |
| 108 | mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); |
| 109 | |
| 110 | /* dup_mmap already holds mmap_sem */ |
| 111 | err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, |
| 112 | VM_READ | VM_MAYREAD | VM_EXEC | |
| 113 | VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, |
| 114 | mm->context.stub_pages); |
| 115 | if (err) { |
| 116 | printk(KERN_ERR "install_special_mapping returned %d\n", err); |
| 117 | goto out; |
| 118 | } |
| 119 | return; |
| 120 | |
| 121 | out: |
| 122 | force_sigsegv(SIGSEGV, current); |
| 123 | } |
| 124 | |
| 125 | void arch_exit_mmap(struct mm_struct *mm) |
| 126 | { |
| 127 | pte_t *pte; |
| 128 | |
| 129 | pte = virt_to_pte(mm, STUB_CODE); |
| 130 | if (pte != NULL) |
| 131 | pte_clear(mm, STUB_CODE, pte); |
| 132 | |
| 133 | pte = virt_to_pte(mm, STUB_DATA); |
| 134 | if (pte == NULL) |
| 135 | return; |
| 136 | |
| 137 | pte_clear(mm, STUB_DATA, pte); |
| 138 | } |
| 139 | |
| 140 | void destroy_context(struct mm_struct *mm) |
| 141 | { |
| 142 | struct mm_context *mmu = &mm->context; |
| 143 | |
| 144 | /* |
| 145 | * If init_new_context wasn't called, this will be |
| 146 | * zero, resulting in a kill(0), which will result in the |
| 147 | * whole UML suddenly dying. Also, cover negative and |
| 148 | * 1 cases, since they shouldn't happen either. |
| 149 | */ |
| 150 | if (mmu->id.u.pid < 2) { |
| 151 | printk(KERN_ERR "corrupt mm_context - pid = %d\n", |
| 152 | mmu->id.u.pid); |
| 153 | return; |
| 154 | } |
| 155 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
| 156 | |
| 157 | free_page(mmu->id.stack); |
| 158 | free_ldt(mmu); |
| 159 | } |