Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Test module for unwind_for_each_frame |
| 4 | */ |
| 5 | |
| 6 | #define pr_fmt(fmt) "test_unwind: " fmt |
| 7 | #include <asm/unwind.h> |
| 8 | #include <linux/completion.h> |
| 9 | #include <linux/kallsyms.h> |
| 10 | #include <linux/kthread.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/string.h> |
| 14 | #include <linux/kprobes.h> |
| 15 | #include <linux/wait.h> |
| 16 | #include <asm/irq.h> |
| 17 | #include <asm/delay.h> |
| 18 | |
| 19 | #define BT_BUF_SIZE (PAGE_SIZE * 4) |
| 20 | |
| 21 | /* |
| 22 | * To avoid printk line limit split backtrace by lines |
| 23 | */ |
| 24 | static void print_backtrace(char *bt) |
| 25 | { |
| 26 | char *p; |
| 27 | |
| 28 | while (true) { |
| 29 | p = strsep(&bt, "\n"); |
| 30 | if (!p) |
| 31 | break; |
| 32 | pr_err("%s\n", p); |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | /* |
| 37 | * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result |
| 38 | * contains unwindme_func2 followed by unwindme_func1. |
| 39 | */ |
| 40 | static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, |
| 41 | unsigned long sp) |
| 42 | { |
| 43 | int frame_count, prev_is_func2, seen_func2_func1; |
| 44 | const int max_frames = 128; |
| 45 | struct unwind_state state; |
| 46 | size_t bt_pos = 0; |
| 47 | int ret = 0; |
| 48 | char *bt; |
| 49 | |
| 50 | bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC); |
| 51 | if (!bt) { |
| 52 | pr_err("failed to allocate backtrace buffer\n"); |
| 53 | return -ENOMEM; |
| 54 | } |
| 55 | /* Unwind. */ |
| 56 | frame_count = 0; |
| 57 | prev_is_func2 = 0; |
| 58 | seen_func2_func1 = 0; |
| 59 | unwind_for_each_frame(&state, task, regs, sp) { |
| 60 | unsigned long addr = unwind_get_return_address(&state); |
| 61 | char sym[KSYM_SYMBOL_LEN]; |
| 62 | |
| 63 | if (frame_count++ == max_frames) |
| 64 | break; |
| 65 | if (state.reliable && !addr) { |
| 66 | pr_err("unwind state reliable but addr is 0\n"); |
| 67 | kfree(bt); |
| 68 | return -EINVAL; |
| 69 | } |
| 70 | sprint_symbol(sym, addr); |
| 71 | if (bt_pos < BT_BUF_SIZE) { |
| 72 | bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos, |
| 73 | state.reliable ? " [%-7s%px] %pSR\n" : |
| 74 | "([%-7s%px] %pSR)\n", |
| 75 | stack_type_name(state.stack_info.type), |
| 76 | (void *)state.sp, (void *)state.ip); |
| 77 | if (bt_pos >= BT_BUF_SIZE) |
| 78 | pr_err("backtrace buffer is too small\n"); |
| 79 | } |
| 80 | frame_count += 1; |
| 81 | if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1")) |
| 82 | seen_func2_func1 = 1; |
| 83 | prev_is_func2 = str_has_prefix(sym, "unwindme_func2"); |
| 84 | } |
| 85 | |
| 86 | /* Check the results. */ |
| 87 | if (unwind_error(&state)) { |
| 88 | pr_err("unwind error\n"); |
| 89 | ret = -EINVAL; |
| 90 | } |
| 91 | if (!seen_func2_func1) { |
| 92 | pr_err("unwindme_func2 and unwindme_func1 not found\n"); |
| 93 | ret = -EINVAL; |
| 94 | } |
| 95 | if (frame_count == max_frames) { |
| 96 | pr_err("Maximum number of frames exceeded\n"); |
| 97 | ret = -EINVAL; |
| 98 | } |
| 99 | if (ret) |
| 100 | print_backtrace(bt); |
| 101 | kfree(bt); |
| 102 | return ret; |
| 103 | } |
| 104 | |
| 105 | /* State of the task being unwound. */ |
| 106 | struct unwindme { |
| 107 | int flags; |
| 108 | int ret; |
| 109 | struct task_struct *task; |
| 110 | struct completion task_ready; |
| 111 | wait_queue_head_t task_wq; |
| 112 | unsigned long sp; |
| 113 | }; |
| 114 | |
| 115 | static struct unwindme *unwindme; |
| 116 | |
| 117 | /* Values of unwindme.flags. */ |
| 118 | #define UWM_DEFAULT 0x0 |
| 119 | #define UWM_THREAD 0x1 /* Unwind a separate task. */ |
| 120 | #define UWM_REGS 0x2 /* Pass regs to test_unwind(). */ |
| 121 | #define UWM_SP 0x4 /* Pass sp to test_unwind(). */ |
| 122 | #define UWM_CALLER 0x8 /* Unwind starting from caller. */ |
| 123 | #define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */ |
| 124 | #define UWM_IRQ 0x20 /* Unwind from irq context. */ |
| 125 | #define UWM_PGM 0x40 /* Unwind from program check handler. */ |
| 126 | |
| 127 | static __always_inline unsigned long get_psw_addr(void) |
| 128 | { |
| 129 | unsigned long psw_addr; |
| 130 | |
| 131 | asm volatile( |
| 132 | "basr %[psw_addr],0\n" |
| 133 | : [psw_addr] "=d" (psw_addr)); |
| 134 | return psw_addr; |
| 135 | } |
| 136 | |
| 137 | #ifdef CONFIG_KPROBES |
| 138 | static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 139 | { |
| 140 | struct unwindme *u = unwindme; |
| 141 | |
| 142 | u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, |
| 143 | (u->flags & UWM_SP) ? u->sp : 0); |
| 144 | return 0; |
| 145 | } |
| 146 | #endif |
| 147 | |
| 148 | /* This function may or may not appear in the backtrace. */ |
| 149 | static noinline int unwindme_func4(struct unwindme *u) |
| 150 | { |
| 151 | if (!(u->flags & UWM_CALLER)) |
| 152 | u->sp = current_frame_address(); |
| 153 | if (u->flags & UWM_THREAD) { |
| 154 | complete(&u->task_ready); |
| 155 | wait_event(u->task_wq, kthread_should_park()); |
| 156 | kthread_parkme(); |
| 157 | return 0; |
| 158 | #ifdef CONFIG_KPROBES |
| 159 | } else if (u->flags & UWM_PGM) { |
| 160 | struct kprobe kp; |
| 161 | int ret; |
| 162 | |
| 163 | unwindme = u; |
| 164 | memset(&kp, 0, sizeof(kp)); |
| 165 | kp.symbol_name = "do_report_trap"; |
| 166 | kp.pre_handler = pgm_pre_handler; |
| 167 | ret = register_kprobe(&kp); |
| 168 | if (ret < 0) { |
| 169 | pr_err("register_kprobe failed %d\n", ret); |
| 170 | return -EINVAL; |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Trigger operation exception; use insn notation to bypass |
| 175 | * llvm's integrated assembler sanity checks. |
| 176 | */ |
| 177 | asm volatile( |
| 178 | " .insn e,0x0000\n" /* illegal opcode */ |
| 179 | "0: nopr %%r7\n" |
| 180 | EX_TABLE(0b, 0b) |
| 181 | :); |
| 182 | |
| 183 | unregister_kprobe(&kp); |
| 184 | unwindme = NULL; |
| 185 | return u->ret; |
| 186 | #endif |
| 187 | } else { |
| 188 | struct pt_regs regs; |
| 189 | |
| 190 | memset(®s, 0, sizeof(regs)); |
| 191 | regs.psw.addr = get_psw_addr(); |
| 192 | regs.gprs[15] = current_stack_pointer(); |
| 193 | return test_unwind(NULL, |
| 194 | (u->flags & UWM_REGS) ? ®s : NULL, |
| 195 | (u->flags & UWM_SP) ? u->sp : 0); |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | /* This function may or may not appear in the backtrace. */ |
| 200 | static noinline int unwindme_func3(struct unwindme *u) |
| 201 | { |
| 202 | u->sp = current_frame_address(); |
| 203 | return unwindme_func4(u); |
| 204 | } |
| 205 | |
| 206 | /* This function must appear in the backtrace. */ |
| 207 | static noinline int unwindme_func2(struct unwindme *u) |
| 208 | { |
| 209 | unsigned long flags; |
| 210 | int rc; |
| 211 | |
| 212 | if (u->flags & UWM_SWITCH_STACK) { |
| 213 | local_irq_save(flags); |
| 214 | local_mcck_disable(); |
| 215 | rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); |
| 216 | local_mcck_enable(); |
| 217 | local_irq_restore(flags); |
| 218 | return rc; |
| 219 | } else { |
| 220 | return unwindme_func3(u); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | /* This function must follow unwindme_func2 in the backtrace. */ |
| 225 | static noinline int unwindme_func1(void *u) |
| 226 | { |
| 227 | return unwindme_func2((struct unwindme *)u); |
| 228 | } |
| 229 | |
| 230 | static void unwindme_irq_handler(struct ext_code ext_code, |
| 231 | unsigned int param32, |
| 232 | unsigned long param64) |
| 233 | { |
| 234 | struct unwindme *u = READ_ONCE(unwindme); |
| 235 | |
| 236 | if (u && u->task == current) { |
| 237 | unwindme = NULL; |
| 238 | u->task = NULL; |
| 239 | u->ret = unwindme_func1(u); |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | static int test_unwind_irq(struct unwindme *u) |
| 244 | { |
| 245 | preempt_disable(); |
| 246 | if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) { |
| 247 | pr_info("Couldn't register external interrupt handler"); |
| 248 | return -1; |
| 249 | } |
| 250 | u->task = current; |
| 251 | unwindme = u; |
| 252 | udelay(1); |
| 253 | unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler); |
| 254 | preempt_enable(); |
| 255 | return u->ret; |
| 256 | } |
| 257 | |
| 258 | /* Spawns a task and passes it to test_unwind(). */ |
| 259 | static int test_unwind_task(struct unwindme *u) |
| 260 | { |
| 261 | struct task_struct *task; |
| 262 | int ret; |
| 263 | |
| 264 | /* Initialize thread-related fields. */ |
| 265 | init_completion(&u->task_ready); |
| 266 | init_waitqueue_head(&u->task_wq); |
| 267 | |
| 268 | /* |
| 269 | * Start the task and wait until it reaches unwindme_func4() and sleeps |
| 270 | * in (task_ready, unwind_done] range. |
| 271 | */ |
| 272 | task = kthread_run(unwindme_func1, u, "%s", __func__); |
| 273 | if (IS_ERR(task)) { |
| 274 | pr_err("kthread_run() failed\n"); |
| 275 | return PTR_ERR(task); |
| 276 | } |
| 277 | /* |
| 278 | * Make sure task reaches unwindme_func4 before parking it, |
| 279 | * we might park it before kthread function has been executed otherwise |
| 280 | */ |
| 281 | wait_for_completion(&u->task_ready); |
| 282 | kthread_park(task); |
| 283 | /* Unwind. */ |
| 284 | ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0); |
| 285 | kthread_stop(task); |
| 286 | return ret; |
| 287 | } |
| 288 | |
| 289 | static int test_unwind_flags(int flags) |
| 290 | { |
| 291 | struct unwindme u; |
| 292 | |
| 293 | u.flags = flags; |
| 294 | if (u.flags & UWM_THREAD) |
| 295 | return test_unwind_task(&u); |
| 296 | else if (u.flags & UWM_IRQ) |
| 297 | return test_unwind_irq(&u); |
| 298 | else |
| 299 | return unwindme_func1(&u); |
| 300 | } |
| 301 | |
| 302 | static int test_unwind_init(void) |
| 303 | { |
| 304 | int ret = 0; |
| 305 | |
| 306 | #define TEST(flags) \ |
| 307 | do { \ |
| 308 | pr_info("[ RUN ] " #flags "\n"); \ |
| 309 | if (!test_unwind_flags((flags))) { \ |
| 310 | pr_info("[ OK ] " #flags "\n"); \ |
| 311 | } else { \ |
| 312 | pr_err("[ FAILED ] " #flags "\n"); \ |
| 313 | ret = -EINVAL; \ |
| 314 | } \ |
| 315 | } while (0) |
| 316 | |
| 317 | TEST(UWM_DEFAULT); |
| 318 | TEST(UWM_SP); |
| 319 | TEST(UWM_REGS); |
| 320 | TEST(UWM_SWITCH_STACK); |
| 321 | TEST(UWM_SP | UWM_REGS); |
| 322 | TEST(UWM_CALLER | UWM_SP); |
| 323 | TEST(UWM_CALLER | UWM_SP | UWM_REGS); |
| 324 | TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); |
| 325 | TEST(UWM_THREAD); |
| 326 | TEST(UWM_THREAD | UWM_SP); |
| 327 | TEST(UWM_THREAD | UWM_CALLER | UWM_SP); |
| 328 | TEST(UWM_IRQ); |
| 329 | TEST(UWM_IRQ | UWM_SWITCH_STACK); |
| 330 | TEST(UWM_IRQ | UWM_SP); |
| 331 | TEST(UWM_IRQ | UWM_REGS); |
| 332 | TEST(UWM_IRQ | UWM_SP | UWM_REGS); |
| 333 | TEST(UWM_IRQ | UWM_CALLER | UWM_SP); |
| 334 | TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS); |
| 335 | TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); |
| 336 | #ifdef CONFIG_KPROBES |
| 337 | TEST(UWM_PGM); |
| 338 | TEST(UWM_PGM | UWM_SP); |
| 339 | TEST(UWM_PGM | UWM_REGS); |
| 340 | TEST(UWM_PGM | UWM_SP | UWM_REGS); |
| 341 | #endif |
| 342 | #undef TEST |
| 343 | |
| 344 | return ret; |
| 345 | } |
| 346 | |
| 347 | static void test_unwind_exit(void) |
| 348 | { |
| 349 | } |
| 350 | |
| 351 | module_init(test_unwind_init); |
| 352 | module_exit(test_unwind_exit); |
| 353 | MODULE_LICENSE("GPL"); |