David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * This file reads all the special sections which have alternate instructions |
| 8 | * which can be patched in or redirected to at runtime. |
| 9 | */ |
| 10 | |
| 11 | #include <stdlib.h> |
| 12 | #include <string.h> |
| 13 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 14 | #include "builtin.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | #include "special.h" |
| 16 | #include "warn.h" |
| 17 | |
| 18 | #define EX_ENTRY_SIZE 12 |
| 19 | #define EX_ORIG_OFFSET 0 |
| 20 | #define EX_NEW_OFFSET 4 |
| 21 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | #define JUMP_ENTRY_SIZE 16 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | #define JUMP_ORIG_OFFSET 0 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 24 | #define JUMP_NEW_OFFSET 4 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | #define ALT_ENTRY_SIZE 13 |
| 27 | #define ALT_ORIG_OFFSET 0 |
| 28 | #define ALT_NEW_OFFSET 4 |
| 29 | #define ALT_FEATURE_OFFSET 8 |
| 30 | #define ALT_ORIG_LEN_OFFSET 10 |
| 31 | #define ALT_NEW_LEN_OFFSET 11 |
| 32 | |
| 33 | #define X86_FEATURE_POPCNT (4*32+23) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 34 | #define X86_FEATURE_SMAP (9*32+20) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | |
| 36 | struct special_entry { |
| 37 | const char *sec; |
| 38 | bool group, jump_or_nop; |
| 39 | unsigned char size, orig, new; |
| 40 | unsigned char orig_len, new_len; /* group only */ |
| 41 | unsigned char feature; /* ALTERNATIVE macro CPU feature */ |
| 42 | }; |
| 43 | |
| 44 | struct special_entry entries[] = { |
| 45 | { |
| 46 | .sec = ".altinstructions", |
| 47 | .group = true, |
| 48 | .size = ALT_ENTRY_SIZE, |
| 49 | .orig = ALT_ORIG_OFFSET, |
| 50 | .orig_len = ALT_ORIG_LEN_OFFSET, |
| 51 | .new = ALT_NEW_OFFSET, |
| 52 | .new_len = ALT_NEW_LEN_OFFSET, |
| 53 | .feature = ALT_FEATURE_OFFSET, |
| 54 | }, |
| 55 | { |
| 56 | .sec = "__jump_table", |
| 57 | .jump_or_nop = true, |
| 58 | .size = JUMP_ENTRY_SIZE, |
| 59 | .orig = JUMP_ORIG_OFFSET, |
| 60 | .new = JUMP_NEW_OFFSET, |
| 61 | }, |
| 62 | { |
| 63 | .sec = "__ex_table", |
| 64 | .size = EX_ENTRY_SIZE, |
| 65 | .orig = EX_ORIG_OFFSET, |
| 66 | .new = EX_NEW_OFFSET, |
| 67 | }, |
| 68 | {}, |
| 69 | }; |
| 70 | |
| 71 | static int get_alt_entry(struct elf *elf, struct special_entry *entry, |
| 72 | struct section *sec, int idx, |
| 73 | struct special_alt *alt) |
| 74 | { |
| 75 | struct rela *orig_rela, *new_rela; |
| 76 | unsigned long offset; |
| 77 | |
| 78 | offset = idx * entry->size; |
| 79 | |
| 80 | alt->group = entry->group; |
| 81 | alt->jump_or_nop = entry->jump_or_nop; |
| 82 | |
| 83 | if (alt->group) { |
| 84 | alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset + |
| 85 | entry->orig_len); |
| 86 | alt->new_len = *(unsigned char *)(sec->data->d_buf + offset + |
| 87 | entry->new_len); |
| 88 | } |
| 89 | |
| 90 | if (entry->feature) { |
| 91 | unsigned short feature; |
| 92 | |
| 93 | feature = *(unsigned short *)(sec->data->d_buf + offset + |
| 94 | entry->feature); |
| 95 | |
| 96 | /* |
| 97 | * It has been requested that we don't validate the !POPCNT |
| 98 | * feature path which is a "very very small percentage of |
| 99 | * machines". |
| 100 | */ |
| 101 | if (feature == X86_FEATURE_POPCNT) |
| 102 | alt->skip_orig = true; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 103 | |
| 104 | /* |
| 105 | * If UACCESS validation is enabled; force that alternative; |
| 106 | * otherwise force it the other way. |
| 107 | * |
| 108 | * What we want to avoid is having both the original and the |
| 109 | * alternative code flow at the same time, in that case we can |
| 110 | * find paths that see the STAC but take the NOP instead of |
| 111 | * CLAC and the other way around. |
| 112 | */ |
| 113 | if (feature == X86_FEATURE_SMAP) { |
| 114 | if (uaccess) |
| 115 | alt->skip_orig = true; |
| 116 | else |
| 117 | alt->skip_alt = true; |
| 118 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | orig_rela = find_rela_by_dest(sec, offset + entry->orig); |
| 122 | if (!orig_rela) { |
| 123 | WARN_FUNC("can't find orig rela", sec, offset + entry->orig); |
| 124 | return -1; |
| 125 | } |
| 126 | if (orig_rela->sym->type != STT_SECTION) { |
| 127 | WARN_FUNC("don't know how to handle non-section rela symbol %s", |
| 128 | sec, offset + entry->orig, orig_rela->sym->name); |
| 129 | return -1; |
| 130 | } |
| 131 | |
| 132 | alt->orig_sec = orig_rela->sym->sec; |
| 133 | alt->orig_off = orig_rela->addend; |
| 134 | |
| 135 | if (!entry->group || alt->new_len) { |
| 136 | new_rela = find_rela_by_dest(sec, offset + entry->new); |
| 137 | if (!new_rela) { |
| 138 | WARN_FUNC("can't find new rela", |
| 139 | sec, offset + entry->new); |
| 140 | return -1; |
| 141 | } |
| 142 | |
| 143 | alt->new_sec = new_rela->sym->sec; |
| 144 | alt->new_off = (unsigned int)new_rela->addend; |
| 145 | |
| 146 | /* _ASM_EXTABLE_EX hack */ |
| 147 | if (alt->new_off >= 0x7ffffff0) |
| 148 | alt->new_off -= 0x7ffffff0; |
| 149 | } |
| 150 | |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Read all the special sections and create a list of special_alt structs which |
| 156 | * describe all the alternate instructions which can be patched in or |
| 157 | * redirected to at runtime. |
| 158 | */ |
| 159 | int special_get_alts(struct elf *elf, struct list_head *alts) |
| 160 | { |
| 161 | struct special_entry *entry; |
| 162 | struct section *sec; |
| 163 | unsigned int nr_entries; |
| 164 | struct special_alt *alt; |
| 165 | int idx, ret; |
| 166 | |
| 167 | INIT_LIST_HEAD(alts); |
| 168 | |
| 169 | for (entry = entries; entry->sec; entry++) { |
| 170 | sec = find_section_by_name(elf, entry->sec); |
| 171 | if (!sec) |
| 172 | continue; |
| 173 | |
| 174 | if (sec->len % entry->size != 0) { |
| 175 | WARN("%s size not a multiple of %d", |
| 176 | sec->name, entry->size); |
| 177 | return -1; |
| 178 | } |
| 179 | |
| 180 | nr_entries = sec->len / entry->size; |
| 181 | |
| 182 | for (idx = 0; idx < nr_entries; idx++) { |
| 183 | alt = malloc(sizeof(*alt)); |
| 184 | if (!alt) { |
| 185 | WARN("malloc failed"); |
| 186 | return -1; |
| 187 | } |
| 188 | memset(alt, 0, sizeof(*alt)); |
| 189 | |
| 190 | ret = get_alt_entry(elf, entry, sec, idx, alt); |
| 191 | if (ret) |
| 192 | return ret; |
| 193 | |
| 194 | list_add_tail(&alt->list, alts); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | return 0; |
| 199 | } |