Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * This is for all the tests related to copy_to_user() and copy_from_user() |
| 4 | * hardening. |
| 5 | */ |
| 6 | #include "lkdtm.h" |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/sched/task_stack.h> |
| 10 | #include <linux/mman.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <asm/cacheflush.h> |
| 13 | |
| 14 | /* |
| 15 | * Many of the tests here end up using const sizes, but those would |
| 16 | * normally be ignored by hardened usercopy, so force the compiler |
| 17 | * into choosing the non-const path to make sure we trigger the |
| 18 | * hardened usercopy checks by added "unconst" to all the const copies, |
| 19 | * and making sure "cache_size" isn't optimized into a const. |
| 20 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 21 | static volatile size_t unconst; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | static volatile size_t cache_size = 1024; |
| 23 | static struct kmem_cache *whitelist_cache; |
| 24 | |
| 25 | static const unsigned char test_text[] = "This is a test.\n"; |
| 26 | |
| 27 | /* |
| 28 | * Instead of adding -Wno-return-local-addr, just pass the stack address |
| 29 | * through a function to obfuscate it from the compiler. |
| 30 | */ |
| 31 | static noinline unsigned char *trick_compiler(unsigned char *stack) |
| 32 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 33 | return stack + unconst; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | } |
| 35 | |
| 36 | static noinline unsigned char *do_usercopy_stack_callee(int value) |
| 37 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 38 | unsigned char buf[128]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | int i; |
| 40 | |
| 41 | /* Exercise stack to avoid everything living in registers. */ |
| 42 | for (i = 0; i < sizeof(buf); i++) { |
| 43 | buf[i] = value & 0xff; |
| 44 | } |
| 45 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 46 | /* |
| 47 | * Put the target buffer in the middle of stack allocation |
| 48 | * so that we don't step on future stack users regardless |
| 49 | * of stack growth direction. |
| 50 | */ |
| 51 | return trick_compiler(&buf[(128/2)-32]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) |
| 55 | { |
| 56 | unsigned long user_addr; |
| 57 | unsigned char good_stack[32]; |
| 58 | unsigned char *bad_stack; |
| 59 | int i; |
| 60 | |
| 61 | /* Exercise stack to avoid everything living in registers. */ |
| 62 | for (i = 0; i < sizeof(good_stack); i++) |
| 63 | good_stack[i] = test_text[i % sizeof(test_text)]; |
| 64 | |
| 65 | /* This is a pointer to outside our current stack frame. */ |
| 66 | if (bad_frame) { |
| 67 | bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack); |
| 68 | } else { |
| 69 | /* Put start address just inside stack. */ |
| 70 | bad_stack = task_stack_page(current) + THREAD_SIZE; |
| 71 | bad_stack -= sizeof(unsigned long); |
| 72 | } |
| 73 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 74 | #ifdef ARCH_HAS_CURRENT_STACK_POINTER |
| 75 | pr_info("stack : %px\n", (void *)current_stack_pointer); |
| 76 | #endif |
| 77 | pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); |
| 78 | pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); |
| 79 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, |
| 81 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 82 | MAP_ANONYMOUS | MAP_PRIVATE, 0); |
| 83 | if (user_addr >= TASK_SIZE) { |
| 84 | pr_warn("Failed to allocate user memory\n"); |
| 85 | return; |
| 86 | } |
| 87 | |
| 88 | if (to_user) { |
| 89 | pr_info("attempting good copy_to_user of local stack\n"); |
| 90 | if (copy_to_user((void __user *)user_addr, good_stack, |
| 91 | unconst + sizeof(good_stack))) { |
| 92 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
| 93 | goto free_user; |
| 94 | } |
| 95 | |
| 96 | pr_info("attempting bad copy_to_user of distant stack\n"); |
| 97 | if (copy_to_user((void __user *)user_addr, bad_stack, |
| 98 | unconst + sizeof(good_stack))) { |
| 99 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
| 100 | goto free_user; |
| 101 | } |
| 102 | } else { |
| 103 | /* |
| 104 | * There isn't a safe way to not be protected by usercopy |
| 105 | * if we're going to write to another thread's stack. |
| 106 | */ |
| 107 | if (!bad_frame) |
| 108 | goto free_user; |
| 109 | |
| 110 | pr_info("attempting good copy_from_user of local stack\n"); |
| 111 | if (copy_from_user(good_stack, (void __user *)user_addr, |
| 112 | unconst + sizeof(good_stack))) { |
| 113 | pr_warn("copy_from_user failed unexpectedly?!\n"); |
| 114 | goto free_user; |
| 115 | } |
| 116 | |
| 117 | pr_info("attempting bad copy_from_user of distant stack\n"); |
| 118 | if (copy_from_user(bad_stack, (void __user *)user_addr, |
| 119 | unconst + sizeof(good_stack))) { |
| 120 | pr_warn("copy_from_user failed, but lacked Oops\n"); |
| 121 | goto free_user; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | free_user: |
| 126 | vm_munmap(user_addr, PAGE_SIZE); |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * This checks for whole-object size validation with hardened usercopy, |
| 131 | * with or without usercopy whitelisting. |
| 132 | */ |
| 133 | static void do_usercopy_heap_size(bool to_user) |
| 134 | { |
| 135 | unsigned long user_addr; |
| 136 | unsigned char *one, *two; |
| 137 | void __user *test_user_addr; |
| 138 | void *test_kern_addr; |
| 139 | size_t size = unconst + 1024; |
| 140 | |
| 141 | one = kmalloc(size, GFP_KERNEL); |
| 142 | two = kmalloc(size, GFP_KERNEL); |
| 143 | if (!one || !two) { |
| 144 | pr_warn("Failed to allocate kernel memory\n"); |
| 145 | goto free_kernel; |
| 146 | } |
| 147 | |
| 148 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, |
| 149 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 150 | MAP_ANONYMOUS | MAP_PRIVATE, 0); |
| 151 | if (user_addr >= TASK_SIZE) { |
| 152 | pr_warn("Failed to allocate user memory\n"); |
| 153 | goto free_kernel; |
| 154 | } |
| 155 | |
| 156 | memset(one, 'A', size); |
| 157 | memset(two, 'B', size); |
| 158 | |
| 159 | test_user_addr = (void __user *)(user_addr + 16); |
| 160 | test_kern_addr = one + 16; |
| 161 | |
| 162 | if (to_user) { |
| 163 | pr_info("attempting good copy_to_user of correct size\n"); |
| 164 | if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) { |
| 165 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
| 166 | goto free_user; |
| 167 | } |
| 168 | |
| 169 | pr_info("attempting bad copy_to_user of too large size\n"); |
| 170 | if (copy_to_user(test_user_addr, test_kern_addr, size)) { |
| 171 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
| 172 | goto free_user; |
| 173 | } |
| 174 | } else { |
| 175 | pr_info("attempting good copy_from_user of correct size\n"); |
| 176 | if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) { |
| 177 | pr_warn("copy_from_user failed unexpectedly?!\n"); |
| 178 | goto free_user; |
| 179 | } |
| 180 | |
| 181 | pr_info("attempting bad copy_from_user of too large size\n"); |
| 182 | if (copy_from_user(test_kern_addr, test_user_addr, size)) { |
| 183 | pr_warn("copy_from_user failed, but lacked Oops\n"); |
| 184 | goto free_user; |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | free_user: |
| 189 | vm_munmap(user_addr, PAGE_SIZE); |
| 190 | free_kernel: |
| 191 | kfree(one); |
| 192 | kfree(two); |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * This checks for the specific whitelist window within an object. If this |
| 197 | * test passes, then do_usercopy_heap_size() tests will pass too. |
| 198 | */ |
| 199 | static void do_usercopy_heap_whitelist(bool to_user) |
| 200 | { |
| 201 | unsigned long user_alloc; |
| 202 | unsigned char *buf = NULL; |
| 203 | unsigned char __user *user_addr; |
| 204 | size_t offset, size; |
| 205 | |
| 206 | /* Make sure cache was prepared. */ |
| 207 | if (!whitelist_cache) { |
| 208 | pr_warn("Failed to allocate kernel cache\n"); |
| 209 | return; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Allocate a buffer with a whitelisted window in the buffer. |
| 214 | */ |
| 215 | buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL); |
| 216 | if (!buf) { |
| 217 | pr_warn("Failed to allocate buffer from whitelist cache\n"); |
| 218 | goto free_alloc; |
| 219 | } |
| 220 | |
| 221 | /* Allocate user memory we'll poke at. */ |
| 222 | user_alloc = vm_mmap(NULL, 0, PAGE_SIZE, |
| 223 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 224 | MAP_ANONYMOUS | MAP_PRIVATE, 0); |
| 225 | if (user_alloc >= TASK_SIZE) { |
| 226 | pr_warn("Failed to allocate user memory\n"); |
| 227 | goto free_alloc; |
| 228 | } |
| 229 | user_addr = (void __user *)user_alloc; |
| 230 | |
| 231 | memset(buf, 'B', cache_size); |
| 232 | |
| 233 | /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */ |
| 234 | offset = (cache_size / 4) + unconst; |
| 235 | size = (cache_size / 16) + unconst; |
| 236 | |
| 237 | if (to_user) { |
| 238 | pr_info("attempting good copy_to_user inside whitelist\n"); |
| 239 | if (copy_to_user(user_addr, buf + offset, size)) { |
| 240 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
| 241 | goto free_user; |
| 242 | } |
| 243 | |
| 244 | pr_info("attempting bad copy_to_user outside whitelist\n"); |
| 245 | if (copy_to_user(user_addr, buf + offset - 1, size)) { |
| 246 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
| 247 | goto free_user; |
| 248 | } |
| 249 | } else { |
| 250 | pr_info("attempting good copy_from_user inside whitelist\n"); |
| 251 | if (copy_from_user(buf + offset, user_addr, size)) { |
| 252 | pr_warn("copy_from_user failed unexpectedly?!\n"); |
| 253 | goto free_user; |
| 254 | } |
| 255 | |
| 256 | pr_info("attempting bad copy_from_user outside whitelist\n"); |
| 257 | if (copy_from_user(buf + offset - 1, user_addr, size)) { |
| 258 | pr_warn("copy_from_user failed, but lacked Oops\n"); |
| 259 | goto free_user; |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | free_user: |
| 264 | vm_munmap(user_alloc, PAGE_SIZE); |
| 265 | free_alloc: |
| 266 | if (buf) |
| 267 | kmem_cache_free(whitelist_cache, buf); |
| 268 | } |
| 269 | |
| 270 | /* Callable tests. */ |
| 271 | void lkdtm_USERCOPY_HEAP_SIZE_TO(void) |
| 272 | { |
| 273 | do_usercopy_heap_size(true); |
| 274 | } |
| 275 | |
| 276 | void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) |
| 277 | { |
| 278 | do_usercopy_heap_size(false); |
| 279 | } |
| 280 | |
| 281 | void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) |
| 282 | { |
| 283 | do_usercopy_heap_whitelist(true); |
| 284 | } |
| 285 | |
| 286 | void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) |
| 287 | { |
| 288 | do_usercopy_heap_whitelist(false); |
| 289 | } |
| 290 | |
| 291 | void lkdtm_USERCOPY_STACK_FRAME_TO(void) |
| 292 | { |
| 293 | do_usercopy_stack(true, true); |
| 294 | } |
| 295 | |
| 296 | void lkdtm_USERCOPY_STACK_FRAME_FROM(void) |
| 297 | { |
| 298 | do_usercopy_stack(false, true); |
| 299 | } |
| 300 | |
| 301 | void lkdtm_USERCOPY_STACK_BEYOND(void) |
| 302 | { |
| 303 | do_usercopy_stack(true, false); |
| 304 | } |
| 305 | |
| 306 | void lkdtm_USERCOPY_KERNEL(void) |
| 307 | { |
| 308 | unsigned long user_addr; |
| 309 | |
| 310 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, |
| 311 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 312 | MAP_ANONYMOUS | MAP_PRIVATE, 0); |
| 313 | if (user_addr >= TASK_SIZE) { |
| 314 | pr_warn("Failed to allocate user memory\n"); |
| 315 | return; |
| 316 | } |
| 317 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 318 | pr_info("attempting good copy_to_user from kernel rodata: %px\n", |
| 319 | test_text); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 320 | if (copy_to_user((void __user *)user_addr, test_text, |
| 321 | unconst + sizeof(test_text))) { |
| 322 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
| 323 | goto free_user; |
| 324 | } |
| 325 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 326 | pr_info("attempting bad copy_to_user from kernel text: %px\n", |
| 327 | vm_mmap); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 328 | if (copy_to_user((void __user *)user_addr, vm_mmap, |
| 329 | unconst + PAGE_SIZE)) { |
| 330 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
| 331 | goto free_user; |
| 332 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 333 | pr_err("FAIL: survived bad copy_to_user()\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 334 | |
| 335 | free_user: |
| 336 | vm_munmap(user_addr, PAGE_SIZE); |
| 337 | } |
| 338 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | void __init lkdtm_usercopy_init(void) |
| 340 | { |
| 341 | /* Prepare cache that lacks SLAB_USERCOPY flag. */ |
| 342 | whitelist_cache = |
| 343 | kmem_cache_create_usercopy("lkdtm-usercopy", cache_size, |
| 344 | 0, 0, |
| 345 | cache_size / 4, |
| 346 | cache_size / 16, |
| 347 | NULL); |
| 348 | } |
| 349 | |
| 350 | void __exit lkdtm_usercopy_exit(void) |
| 351 | { |
| 352 | kmem_cache_destroy(whitelist_cache); |
| 353 | } |