Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * User-space Probes (UProbes) for sparc |
| 3 | * |
| 4 | * Copyright (C) 2013 Oracle Inc. |
| 5 | * |
| 6 | * This program is free software: you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation, either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | * |
| 19 | * Authors: |
| 20 | * Jose E. Marchesi <jose.marchesi@oracle.com> |
| 21 | * Eric Saint Etienne <eric.saint.etienne@oracle.com> |
| 22 | */ |
| 23 | |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/highmem.h> |
| 26 | #include <linux/uprobes.h> |
| 27 | #include <linux/uaccess.h> |
| 28 | #include <linux/sched.h> /* For struct task_struct */ |
| 29 | #include <linux/kdebug.h> |
| 30 | |
| 31 | #include <asm/cacheflush.h> |
| 32 | #include <linux/uaccess.h> |
| 33 | |
| 34 | /* Compute the address of the breakpoint instruction and return it. |
| 35 | * |
| 36 | * Note that uprobe_get_swbp_addr is defined as a weak symbol in |
| 37 | * kernel/events/uprobe.c. |
| 38 | */ |
| 39 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) |
| 40 | { |
| 41 | return instruction_pointer(regs); |
| 42 | } |
| 43 | |
| 44 | static void copy_to_page(struct page *page, unsigned long vaddr, |
| 45 | const void *src, int len) |
| 46 | { |
| 47 | void *kaddr = kmap_atomic(page); |
| 48 | |
| 49 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); |
| 50 | kunmap_atomic(kaddr); |
| 51 | } |
| 52 | |
| 53 | /* Fill in the xol area with the probed instruction followed by the |
| 54 | * single-step trap. Some fixups in the copied instruction are |
| 55 | * performed at this point. |
| 56 | * |
| 57 | * Note that uprobe_xol_copy is defined as a weak symbol in |
| 58 | * kernel/events/uprobe.c. |
| 59 | */ |
| 60 | void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
| 61 | void *src, unsigned long len) |
| 62 | { |
| 63 | const u32 stp_insn = UPROBE_STP_INSN; |
| 64 | u32 insn = *(u32 *) src; |
| 65 | |
| 66 | /* Branches annulling their delay slot must be fixed to not do |
| 67 | * so. Clearing the annul bit on these instructions we can be |
| 68 | * sure the single-step breakpoint in the XOL slot will be |
| 69 | * executed. |
| 70 | */ |
| 71 | |
| 72 | u32 op = (insn >> 30) & 0x3; |
| 73 | u32 op2 = (insn >> 22) & 0x7; |
| 74 | |
| 75 | if (op == 0 && |
| 76 | (op2 == 1 || op2 == 2 || op2 == 3 || op2 == 5 || op2 == 6) && |
| 77 | (insn & ANNUL_BIT) == ANNUL_BIT) |
| 78 | insn &= ~ANNUL_BIT; |
| 79 | |
| 80 | copy_to_page(page, vaddr, &insn, len); |
| 81 | copy_to_page(page, vaddr+len, &stp_insn, 4); |
| 82 | } |
| 83 | |
| 84 | |
| 85 | /* Instruction analysis/validity. |
| 86 | * |
| 87 | * This function returns 0 on success or a -ve number on error. |
| 88 | */ |
| 89 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, |
| 90 | struct mm_struct *mm, unsigned long addr) |
| 91 | { |
| 92 | /* Any unsupported instruction? Then return -EINVAL */ |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | /* If INSN is a relative control transfer instruction, return the |
| 97 | * corrected branch destination value. |
| 98 | * |
| 99 | * Note that regs->tpc and regs->tnpc still hold the values of the |
| 100 | * program counters at the time of the single-step trap due to the |
| 101 | * execution of the UPROBE_STP_INSN at utask->xol_vaddr + 4. |
| 102 | * |
| 103 | */ |
| 104 | static unsigned long relbranch_fixup(u32 insn, struct uprobe_task *utask, |
| 105 | struct pt_regs *regs) |
| 106 | { |
| 107 | /* Branch not taken, no mods necessary. */ |
| 108 | if (regs->tnpc == regs->tpc + 0x4UL) |
| 109 | return utask->autask.saved_tnpc + 0x4UL; |
| 110 | |
| 111 | /* The three cases are call, branch w/prediction, |
| 112 | * and traditional branch. |
| 113 | */ |
| 114 | if ((insn & 0xc0000000) == 0x40000000 || |
| 115 | (insn & 0xc1c00000) == 0x00400000 || |
| 116 | (insn & 0xc1c00000) == 0x00800000) { |
| 117 | unsigned long real_pc = (unsigned long) utask->vaddr; |
| 118 | unsigned long ixol_addr = utask->xol_vaddr; |
| 119 | |
| 120 | /* The instruction did all the work for us |
| 121 | * already, just apply the offset to the correct |
| 122 | * instruction location. |
| 123 | */ |
| 124 | return (real_pc + (regs->tnpc - ixol_addr)); |
| 125 | } |
| 126 | |
| 127 | /* It is jmpl or some other absolute PC modification instruction, |
| 128 | * leave NPC as-is. |
| 129 | */ |
| 130 | return regs->tnpc; |
| 131 | } |
| 132 | |
| 133 | /* If INSN is an instruction which writes its PC location |
| 134 | * into a destination register, fix that up. |
| 135 | */ |
| 136 | static int retpc_fixup(struct pt_regs *regs, u32 insn, |
| 137 | unsigned long real_pc) |
| 138 | { |
| 139 | unsigned long *slot = NULL; |
| 140 | int rc = 0; |
| 141 | |
| 142 | /* Simplest case is 'call', which always uses %o7 */ |
| 143 | if ((insn & 0xc0000000) == 0x40000000) |
| 144 | slot = ®s->u_regs[UREG_I7]; |
| 145 | |
| 146 | /* 'jmpl' encodes the register inside of the opcode */ |
| 147 | if ((insn & 0xc1f80000) == 0x81c00000) { |
| 148 | unsigned long rd = ((insn >> 25) & 0x1f); |
| 149 | |
| 150 | if (rd <= 15) { |
| 151 | slot = ®s->u_regs[rd]; |
| 152 | } else { |
| 153 | unsigned long fp = regs->u_regs[UREG_FP]; |
| 154 | /* Hard case, it goes onto the stack. */ |
| 155 | flushw_all(); |
| 156 | |
| 157 | rd -= 16; |
| 158 | if (test_thread_64bit_stack(fp)) { |
| 159 | unsigned long __user *uslot = |
| 160 | (unsigned long __user *) (fp + STACK_BIAS) + rd; |
| 161 | rc = __put_user(real_pc, uslot); |
| 162 | } else { |
| 163 | unsigned int __user *uslot = (unsigned int |
| 164 | __user *) fp + rd; |
| 165 | rc = __put_user((u32) real_pc, uslot); |
| 166 | } |
| 167 | } |
| 168 | } |
| 169 | if (slot != NULL) |
| 170 | *slot = real_pc; |
| 171 | return rc; |
| 172 | } |
| 173 | |
| 174 | /* Single-stepping can be avoided for certain instructions: NOPs and |
| 175 | * instructions that can be emulated. This function determines |
| 176 | * whether the instruction where the uprobe is installed falls in one |
| 177 | * of these cases and emulates it. |
| 178 | * |
| 179 | * This function returns true if the single-stepping can be skipped, |
| 180 | * false otherwise. |
| 181 | */ |
| 182 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 183 | { |
| 184 | /* We currently only emulate NOP instructions. |
| 185 | */ |
| 186 | |
| 187 | if (auprobe->ixol == (1 << 24)) { |
| 188 | regs->tnpc += 4; |
| 189 | regs->tpc += 4; |
| 190 | return true; |
| 191 | } |
| 192 | |
| 193 | return false; |
| 194 | } |
| 195 | |
| 196 | /* Prepare to execute out of line. At this point |
| 197 | * current->utask->xol_vaddr points to an allocated XOL slot properly |
| 198 | * initialized with the original instruction and the single-stepping |
| 199 | * trap instruction. |
| 200 | * |
| 201 | * This function returns 0 on success, any other number on error. |
| 202 | */ |
| 203 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 204 | { |
| 205 | struct uprobe_task *utask = current->utask; |
| 206 | struct arch_uprobe_task *autask = ¤t->utask->autask; |
| 207 | |
| 208 | /* Save the current program counters so they can be restored |
| 209 | * later. |
| 210 | */ |
| 211 | autask->saved_tpc = regs->tpc; |
| 212 | autask->saved_tnpc = regs->tnpc; |
| 213 | |
| 214 | /* Adjust PC and NPC so the first instruction in the XOL slot |
| 215 | * will be executed by the user task. |
| 216 | */ |
| 217 | instruction_pointer_set(regs, utask->xol_vaddr); |
| 218 | |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | /* Prepare to resume execution after the single-step. Called after |
| 223 | * single-stepping. To avoid the SMP problems that can occur when we |
| 224 | * temporarily put back the original opcode to single-step, we |
| 225 | * single-stepped a copy of the instruction. |
| 226 | * |
| 227 | * This function returns 0 on success, any other number on error. |
| 228 | */ |
| 229 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 230 | { |
| 231 | struct uprobe_task *utask = current->utask; |
| 232 | struct arch_uprobe_task *autask = &utask->autask; |
| 233 | u32 insn = auprobe->ixol; |
| 234 | int rc = 0; |
| 235 | |
| 236 | if (utask->state == UTASK_SSTEP_ACK) { |
| 237 | regs->tnpc = relbranch_fixup(insn, utask, regs); |
| 238 | regs->tpc = autask->saved_tnpc; |
| 239 | rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr); |
| 240 | } else { |
| 241 | regs->tnpc = utask->vaddr+4; |
| 242 | regs->tpc = autask->saved_tnpc+4; |
| 243 | } |
| 244 | return rc; |
| 245 | } |
| 246 | |
| 247 | /* Handler for uprobe traps. This is called from the traps table and |
| 248 | * triggers the proper die notification. |
| 249 | */ |
| 250 | asmlinkage void uprobe_trap(struct pt_regs *regs, |
| 251 | unsigned long trap_level) |
| 252 | { |
| 253 | BUG_ON(trap_level != 0x173 && trap_level != 0x174); |
| 254 | |
| 255 | /* We are only interested in user-mode code. Uprobe traps |
| 256 | * shall not be present in kernel code. |
| 257 | */ |
| 258 | if (!user_mode(regs)) { |
| 259 | local_irq_enable(); |
| 260 | bad_trap(regs, trap_level); |
| 261 | return; |
| 262 | } |
| 263 | |
| 264 | /* trap_level == 0x173 --> ta 0x73 |
| 265 | * trap_level == 0x174 --> ta 0x74 |
| 266 | */ |
| 267 | if (notify_die((trap_level == 0x173) ? DIE_BPT : DIE_SSTEP, |
| 268 | (trap_level == 0x173) ? "bpt" : "sstep", |
| 269 | regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) |
| 270 | bad_trap(regs, trap_level); |
| 271 | } |
| 272 | |
| 273 | /* Callback routine for handling die notifications. |
| 274 | */ |
| 275 | int arch_uprobe_exception_notify(struct notifier_block *self, |
| 276 | unsigned long val, void *data) |
| 277 | { |
| 278 | int ret = NOTIFY_DONE; |
| 279 | struct die_args *args = (struct die_args *)data; |
| 280 | |
| 281 | /* We are only interested in userspace traps */ |
| 282 | if (args->regs && !user_mode(args->regs)) |
| 283 | return NOTIFY_DONE; |
| 284 | |
| 285 | switch (val) { |
| 286 | case DIE_BPT: |
| 287 | if (uprobe_pre_sstep_notifier(args->regs)) |
| 288 | ret = NOTIFY_STOP; |
| 289 | break; |
| 290 | |
| 291 | case DIE_SSTEP: |
| 292 | if (uprobe_post_sstep_notifier(args->regs)) |
| 293 | ret = NOTIFY_STOP; |
| 294 | |
| 295 | default: |
| 296 | break; |
| 297 | } |
| 298 | |
| 299 | return ret; |
| 300 | } |
| 301 | |
| 302 | /* This function gets called when a XOL instruction either gets |
| 303 | * trapped or the thread has a fatal signal, so reset the instruction |
| 304 | * pointer to its probed address. |
| 305 | */ |
| 306 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 307 | { |
| 308 | struct uprobe_task *utask = current->utask; |
| 309 | |
| 310 | instruction_pointer_set(regs, utask->vaddr); |
| 311 | } |
| 312 | |
| 313 | /* If xol insn itself traps and generates a signal(Say, |
| 314 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped |
| 315 | * instruction jumps back to its own address. |
| 316 | */ |
| 317 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) |
| 318 | { |
| 319 | return false; |
| 320 | } |
| 321 | |
| 322 | unsigned long |
| 323 | arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, |
| 324 | struct pt_regs *regs) |
| 325 | { |
| 326 | unsigned long orig_ret_vaddr = regs->u_regs[UREG_I7]; |
| 327 | |
| 328 | regs->u_regs[UREG_I7] = trampoline_vaddr-8; |
| 329 | |
| 330 | return orig_ret_vaddr + 8; |
| 331 | } |