Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * This is for all the tests relating directly to heap memory, including |
| 4 | * page allocation and slab allocations. |
| 5 | */ |
| 6 | #include "lkdtm.h" |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/sched.h> |
| 9 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 10 | static struct kmem_cache *double_free_cache; |
| 11 | static struct kmem_cache *a_cache; |
| 12 | static struct kmem_cache *b_cache; |
| 13 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | /* |
| 15 | * This tries to stay within the next largest power-of-2 kmalloc cache |
| 16 | * to avoid actually overwriting anything important if it's not detected |
| 17 | * correctly. |
| 18 | */ |
| 19 | void lkdtm_OVERWRITE_ALLOCATION(void) |
| 20 | { |
| 21 | size_t len = 1020; |
| 22 | u32 *data = kmalloc(len, GFP_KERNEL); |
| 23 | if (!data) |
| 24 | return; |
| 25 | |
| 26 | data[1024 / sizeof(u32)] = 0x12345678; |
| 27 | kfree(data); |
| 28 | } |
| 29 | |
| 30 | void lkdtm_WRITE_AFTER_FREE(void) |
| 31 | { |
| 32 | int *base, *again; |
| 33 | size_t len = 1024; |
| 34 | /* |
| 35 | * The slub allocator uses the first word to store the free |
| 36 | * pointer in some configurations. Use the middle of the |
| 37 | * allocation to avoid running into the freelist |
| 38 | */ |
| 39 | size_t offset = (len / sizeof(*base)) / 2; |
| 40 | |
| 41 | base = kmalloc(len, GFP_KERNEL); |
| 42 | if (!base) |
| 43 | return; |
| 44 | pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]); |
| 45 | pr_info("Attempting bad write to freed memory at %p\n", |
| 46 | &base[offset]); |
| 47 | kfree(base); |
| 48 | base[offset] = 0x0abcdef0; |
| 49 | /* Attempt to notice the overwrite. */ |
| 50 | again = kmalloc(len, GFP_KERNEL); |
| 51 | kfree(again); |
| 52 | if (again != base) |
| 53 | pr_info("Hmm, didn't get the same memory range.\n"); |
| 54 | } |
| 55 | |
| 56 | void lkdtm_READ_AFTER_FREE(void) |
| 57 | { |
| 58 | int *base, *val, saw; |
| 59 | size_t len = 1024; |
| 60 | /* |
| 61 | * The slub allocator uses the first word to store the free |
| 62 | * pointer in some configurations. Use the middle of the |
| 63 | * allocation to avoid running into the freelist |
| 64 | */ |
| 65 | size_t offset = (len / sizeof(*base)) / 2; |
| 66 | |
| 67 | base = kmalloc(len, GFP_KERNEL); |
| 68 | if (!base) { |
| 69 | pr_info("Unable to allocate base memory.\n"); |
| 70 | return; |
| 71 | } |
| 72 | |
| 73 | val = kmalloc(len, GFP_KERNEL); |
| 74 | if (!val) { |
| 75 | pr_info("Unable to allocate val memory.\n"); |
| 76 | kfree(base); |
| 77 | return; |
| 78 | } |
| 79 | |
| 80 | *val = 0x12345678; |
| 81 | base[offset] = *val; |
| 82 | pr_info("Value in memory before free: %x\n", base[offset]); |
| 83 | |
| 84 | kfree(base); |
| 85 | |
| 86 | pr_info("Attempting bad read from freed memory\n"); |
| 87 | saw = base[offset]; |
| 88 | if (saw != *val) { |
| 89 | /* Good! Poisoning happened, so declare a win. */ |
| 90 | pr_info("Memory correctly poisoned (%x)\n", saw); |
| 91 | BUG(); |
| 92 | } |
| 93 | pr_info("Memory was not poisoned\n"); |
| 94 | |
| 95 | kfree(val); |
| 96 | } |
| 97 | |
| 98 | void lkdtm_WRITE_BUDDY_AFTER_FREE(void) |
| 99 | { |
| 100 | unsigned long p = __get_free_page(GFP_KERNEL); |
| 101 | if (!p) { |
| 102 | pr_info("Unable to allocate free page\n"); |
| 103 | return; |
| 104 | } |
| 105 | |
| 106 | pr_info("Writing to the buddy page before free\n"); |
| 107 | memset((void *)p, 0x3, PAGE_SIZE); |
| 108 | free_page(p); |
| 109 | schedule(); |
| 110 | pr_info("Attempting bad write to the buddy page after free\n"); |
| 111 | memset((void *)p, 0x78, PAGE_SIZE); |
| 112 | /* Attempt to notice the overwrite. */ |
| 113 | p = __get_free_page(GFP_KERNEL); |
| 114 | free_page(p); |
| 115 | schedule(); |
| 116 | } |
| 117 | |
| 118 | void lkdtm_READ_BUDDY_AFTER_FREE(void) |
| 119 | { |
| 120 | unsigned long p = __get_free_page(GFP_KERNEL); |
| 121 | int saw, *val; |
| 122 | int *base; |
| 123 | |
| 124 | if (!p) { |
| 125 | pr_info("Unable to allocate free page\n"); |
| 126 | return; |
| 127 | } |
| 128 | |
| 129 | val = kmalloc(1024, GFP_KERNEL); |
| 130 | if (!val) { |
| 131 | pr_info("Unable to allocate val memory.\n"); |
| 132 | free_page(p); |
| 133 | return; |
| 134 | } |
| 135 | |
| 136 | base = (int *)p; |
| 137 | |
| 138 | *val = 0x12345678; |
| 139 | base[0] = *val; |
| 140 | pr_info("Value in memory before free: %x\n", base[0]); |
| 141 | free_page(p); |
| 142 | pr_info("Attempting to read from freed memory\n"); |
| 143 | saw = base[0]; |
| 144 | if (saw != *val) { |
| 145 | /* Good! Poisoning happened, so declare a win. */ |
| 146 | pr_info("Memory correctly poisoned (%x)\n", saw); |
| 147 | BUG(); |
| 148 | } |
| 149 | pr_info("Buddy page was not poisoned\n"); |
| 150 | |
| 151 | kfree(val); |
| 152 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 153 | |
| 154 | void lkdtm_SLAB_FREE_DOUBLE(void) |
| 155 | { |
| 156 | int *val; |
| 157 | |
| 158 | val = kmem_cache_alloc(double_free_cache, GFP_KERNEL); |
| 159 | if (!val) { |
| 160 | pr_info("Unable to allocate double_free_cache memory.\n"); |
| 161 | return; |
| 162 | } |
| 163 | |
| 164 | /* Just make sure we got real memory. */ |
| 165 | *val = 0x12345678; |
| 166 | pr_info("Attempting double slab free ...\n"); |
| 167 | kmem_cache_free(double_free_cache, val); |
| 168 | kmem_cache_free(double_free_cache, val); |
| 169 | } |
| 170 | |
| 171 | void lkdtm_SLAB_FREE_CROSS(void) |
| 172 | { |
| 173 | int *val; |
| 174 | |
| 175 | val = kmem_cache_alloc(a_cache, GFP_KERNEL); |
| 176 | if (!val) { |
| 177 | pr_info("Unable to allocate a_cache memory.\n"); |
| 178 | return; |
| 179 | } |
| 180 | |
| 181 | /* Just make sure we got real memory. */ |
| 182 | *val = 0x12345679; |
| 183 | pr_info("Attempting cross-cache slab free ...\n"); |
| 184 | kmem_cache_free(b_cache, val); |
| 185 | } |
| 186 | |
| 187 | void lkdtm_SLAB_FREE_PAGE(void) |
| 188 | { |
| 189 | unsigned long p = __get_free_page(GFP_KERNEL); |
| 190 | |
| 191 | pr_info("Attempting non-Slab slab free ...\n"); |
| 192 | kmem_cache_free(NULL, (void *)p); |
| 193 | free_page(p); |
| 194 | } |
| 195 | |
| 196 | /* |
| 197 | * We have constructors to keep the caches distinctly separated without |
| 198 | * needing to boot with "slab_nomerge". |
| 199 | */ |
| 200 | static void ctor_double_free(void *region) |
| 201 | { } |
| 202 | static void ctor_a(void *region) |
| 203 | { } |
| 204 | static void ctor_b(void *region) |
| 205 | { } |
| 206 | |
| 207 | void __init lkdtm_heap_init(void) |
| 208 | { |
| 209 | double_free_cache = kmem_cache_create("lkdtm-heap-double_free", |
| 210 | 64, 0, 0, ctor_double_free); |
| 211 | a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a); |
| 212 | b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b); |
| 213 | } |
| 214 | |
| 215 | void __exit lkdtm_heap_exit(void) |
| 216 | { |
| 217 | kmem_cache_destroy(double_free_cache); |
| 218 | kmem_cache_destroy(a_cache); |
| 219 | kmem_cache_destroy(b_cache); |
| 220 | } |