Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_COMPILER_H |
| 3 | #define __LINUX_COMPILER_H |
| 4 | |
| 5 | #include <linux/compiler_types.h> |
| 6 | |
| 7 | #ifndef __ASSEMBLY__ |
| 8 | |
| 9 | #ifdef __KERNEL__ |
| 10 | |
| 11 | /* |
| 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 13 | * to disable branch tracing on a per file basis. |
| 14 | */ |
| 15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
| 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
| 18 | int expect, int is_constant); |
| 19 | |
| 20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 22 | |
| 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
| 24 | long ______r; \ |
| 25 | static struct ftrace_likely_data \ |
| 26 | __attribute__((__aligned__(4))) \ |
| 27 | __attribute__((section("_ftrace_annotated_branch"))) \ |
| 28 | ______f = { \ |
| 29 | .data.func = __func__, \ |
| 30 | .data.file = __FILE__, \ |
| 31 | .data.line = __LINE__, \ |
| 32 | }; \ |
| 33 | ______r = __builtin_expect(!!(x), expect); \ |
| 34 | ftrace_likely_update(&______f, ______r, \ |
| 35 | expect, is_constant); \ |
| 36 | ______r; \ |
| 37 | }) |
| 38 | |
| 39 | /* |
| 40 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 41 | * value is always the same. This idea is taken from a similar patch |
| 42 | * written by Daniel Walker. |
| 43 | */ |
| 44 | # ifndef likely |
| 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
| 46 | # endif |
| 47 | # ifndef unlikely |
| 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
| 49 | # endif |
| 50 | |
| 51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 52 | /* |
| 53 | * "Define 'is'", Bill Clinton |
| 54 | * "Define 'if'", Steven Rostedt |
| 55 | */ |
| 56 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
| 57 | #define __trace_if(cond) \ |
| 58 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
| 59 | ({ \ |
| 60 | int ______r; \ |
| 61 | static struct ftrace_branch_data \ |
| 62 | __attribute__((__aligned__(4))) \ |
| 63 | __attribute__((section("_ftrace_branch"))) \ |
| 64 | ______f = { \ |
| 65 | .func = __func__, \ |
| 66 | .file = __FILE__, \ |
| 67 | .line = __LINE__, \ |
| 68 | }; \ |
| 69 | ______r = !!(cond); \ |
| 70 | ______f.miss_hit[______r]++; \ |
| 71 | ______r; \ |
| 72 | })) |
| 73 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 74 | |
| 75 | #else |
| 76 | # define likely(x) __builtin_expect(!!(x), 1) |
| 77 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 78 | #endif |
| 79 | |
| 80 | /* Optimization barrier */ |
| 81 | #ifndef barrier |
| 82 | # define barrier() __memory_barrier() |
| 83 | #endif |
| 84 | |
| 85 | #ifndef barrier_data |
| 86 | # define barrier_data(ptr) barrier() |
| 87 | #endif |
| 88 | |
| 89 | /* workaround for GCC PR82365 if needed */ |
| 90 | #ifndef barrier_before_unreachable |
| 91 | # define barrier_before_unreachable() do { } while (0) |
| 92 | #endif |
| 93 | |
| 94 | /* Unreachable code */ |
| 95 | #ifdef CONFIG_STACK_VALIDATION |
| 96 | /* |
| 97 | * These macros help objtool understand GCC code flow for unreachable code. |
| 98 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
| 99 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 100 | */ |
| 101 | #define annotate_reachable() ({ \ |
| 102 | asm volatile("%c0:\n\t" \ |
| 103 | ".pushsection .discard.reachable\n\t" \ |
| 104 | ".long %c0b - .\n\t" \ |
| 105 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 106 | }) |
| 107 | #define annotate_unreachable() ({ \ |
| 108 | asm volatile("%c0:\n\t" \ |
| 109 | ".pushsection .discard.unreachable\n\t" \ |
| 110 | ".long %c0b - .\n\t" \ |
| 111 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 112 | }) |
| 113 | #define ASM_UNREACHABLE \ |
| 114 | "999:\n\t" \ |
| 115 | ".pushsection .discard.unreachable\n\t" \ |
| 116 | ".long 999b - .\n\t" \ |
| 117 | ".popsection\n\t" |
| 118 | #else |
| 119 | #define annotate_reachable() |
| 120 | #define annotate_unreachable() |
| 121 | #endif |
| 122 | |
| 123 | #ifndef ASM_UNREACHABLE |
| 124 | # define ASM_UNREACHABLE |
| 125 | #endif |
| 126 | #ifndef unreachable |
| 127 | # define unreachable() do { annotate_reachable(); do { } while (1); } while (0) |
| 128 | #endif |
| 129 | |
| 130 | /* |
| 131 | * KENTRY - kernel entry point |
| 132 | * This can be used to annotate symbols (functions or data) that are used |
| 133 | * without their linker symbol being referenced explicitly. For example, |
| 134 | * interrupt vector handlers, or functions in the kernel image that are found |
| 135 | * programatically. |
| 136 | * |
| 137 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 138 | * are handled in their own way (with KEEP() in linker scripts). |
| 139 | * |
| 140 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 141 | * linker script. For example an architecture could KEEP() its entire |
| 142 | * boot/exception vector code rather than annotate each function and data. |
| 143 | */ |
| 144 | #ifndef KENTRY |
| 145 | # define KENTRY(sym) \ |
| 146 | extern typeof(sym) sym; \ |
| 147 | static const unsigned long __kentry_##sym \ |
| 148 | __used \ |
| 149 | __attribute__((section("___kentry" "+" #sym ), used)) \ |
| 150 | = (unsigned long)&sym; |
| 151 | #endif |
| 152 | |
| 153 | #ifndef RELOC_HIDE |
| 154 | # define RELOC_HIDE(ptr, off) \ |
| 155 | ({ unsigned long __ptr; \ |
| 156 | __ptr = (unsigned long) (ptr); \ |
| 157 | (typeof(ptr)) (__ptr + (off)); }) |
| 158 | #endif |
| 159 | |
| 160 | #ifndef OPTIMIZER_HIDE_VAR |
| 161 | #define OPTIMIZER_HIDE_VAR(var) barrier() |
| 162 | #endif |
| 163 | |
| 164 | /* Not-quite-unique ID. */ |
| 165 | #ifndef __UNIQUE_ID |
| 166 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 167 | #endif |
| 168 | |
| 169 | #include <uapi/linux/types.h> |
| 170 | |
| 171 | #define __READ_ONCE_SIZE \ |
| 172 | ({ \ |
| 173 | switch (size) { \ |
| 174 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
| 175 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
| 176 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
| 177 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
| 178 | default: \ |
| 179 | barrier(); \ |
| 180 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
| 181 | barrier(); \ |
| 182 | } \ |
| 183 | }) |
| 184 | |
| 185 | static __always_inline |
| 186 | void __read_once_size(const volatile void *p, void *res, int size) |
| 187 | { |
| 188 | __READ_ONCE_SIZE; |
| 189 | } |
| 190 | |
| 191 | #ifdef CONFIG_KASAN |
| 192 | /* |
| 193 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
| 194 | * with inlining. Attempt to inline it may cause a build failure. |
| 195 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 196 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 197 | */ |
| 198 | # define __no_kasan_or_inline __no_sanitize_address __maybe_unused |
| 199 | #else |
| 200 | # define __no_kasan_or_inline __always_inline |
| 201 | #endif |
| 202 | |
| 203 | static __no_kasan_or_inline |
| 204 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 205 | { |
| 206 | __READ_ONCE_SIZE; |
| 207 | } |
| 208 | |
| 209 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 210 | { |
| 211 | switch (size) { |
| 212 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
| 213 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
| 214 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
| 215 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
| 216 | default: |
| 217 | barrier(); |
| 218 | __builtin_memcpy((void *)p, (const void *)res, size); |
| 219 | barrier(); |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Prevent the compiler from merging or refetching reads or writes. The |
| 225 | * compiler is also forbidden from reordering successive instances of |
| 226 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 227 | * particular ordering. One way to make the compiler aware of ordering is to |
| 228 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 229 | * statements. |
| 230 | * |
| 231 | * These two macros will also work on aggregate data types like structs or |
| 232 | * unions. If the size of the accessed data type exceeds the word size of |
| 233 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 234 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 235 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 236 | * - '__u' allocated on the stack. |
| 237 | * |
| 238 | * Their two major use cases are: (1) Mediating communication between |
| 239 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 240 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 241 | * mutilate accesses that either do not require ordering or that interact |
| 242 | * with an explicit memory barrier or atomic instruction that provides the |
| 243 | * required ordering. |
| 244 | */ |
| 245 | #include <asm/barrier.h> |
| 246 | #include <linux/kasan-checks.h> |
| 247 | |
| 248 | #define __READ_ONCE(x, check) \ |
| 249 | ({ \ |
| 250 | union { typeof(x) __val; char __c[1]; } __u; \ |
| 251 | if (check) \ |
| 252 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 253 | else \ |
| 254 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
| 255 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ |
| 256 | __u.__val; \ |
| 257 | }) |
| 258 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
| 259 | |
| 260 | /* |
| 261 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
| 262 | * to hide memory access from KASAN. |
| 263 | */ |
| 264 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
| 265 | |
| 266 | static __no_kasan_or_inline |
| 267 | unsigned long read_word_at_a_time(const void *addr) |
| 268 | { |
| 269 | kasan_check_read(addr, 1); |
| 270 | return *(unsigned long *)addr; |
| 271 | } |
| 272 | |
| 273 | #define WRITE_ONCE(x, val) \ |
| 274 | ({ \ |
| 275 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 276 | { .__val = (__force typeof(x)) (val) }; \ |
| 277 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 278 | __u.__val; \ |
| 279 | }) |
| 280 | |
| 281 | #endif /* __KERNEL__ */ |
| 282 | |
| 283 | /* |
| 284 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
| 285 | * it from inline assembler. Necessary in case 'sym' could be inlined |
| 286 | * otherwise, or eliminated entirely due to lack of references that are |
| 287 | * visible to the compiler. |
| 288 | */ |
| 289 | #define __ADDRESSABLE(sym) \ |
| 290 | static void * __attribute__((section(".discard.addressable"), used)) \ |
| 291 | __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; |
| 292 | |
| 293 | /** |
| 294 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
| 295 | * @off: the address of the 32-bit offset value |
| 296 | */ |
| 297 | static inline void *offset_to_ptr(const int *off) |
| 298 | { |
| 299 | return (void *)((unsigned long)off + *off); |
| 300 | } |
| 301 | |
| 302 | #endif /* __ASSEMBLY__ */ |
| 303 | |
| 304 | #ifndef __optimize |
| 305 | # define __optimize(level) |
| 306 | #endif |
| 307 | |
| 308 | /* Compile time object size, -1 for unknown */ |
| 309 | #ifndef __compiletime_object_size |
| 310 | # define __compiletime_object_size(obj) -1 |
| 311 | #endif |
| 312 | #ifndef __compiletime_warning |
| 313 | # define __compiletime_warning(message) |
| 314 | #endif |
| 315 | #ifndef __compiletime_error |
| 316 | # define __compiletime_error(message) |
| 317 | /* |
| 318 | * Sparse complains of variable sized arrays due to the temporary variable in |
| 319 | * __compiletime_assert. Unfortunately we can't just expand it out to make |
| 320 | * sparse see a constant array size without breaking compiletime_assert on old |
| 321 | * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. |
| 322 | */ |
| 323 | # ifndef __CHECKER__ |
| 324 | # define __compiletime_error_fallback(condition) \ |
| 325 | do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
| 326 | # endif |
| 327 | #endif |
| 328 | #ifndef __compiletime_error_fallback |
| 329 | # define __compiletime_error_fallback(condition) do { } while (0) |
| 330 | #endif |
| 331 | |
| 332 | #ifdef __OPTIMIZE__ |
| 333 | # define __compiletime_assert(condition, msg, prefix, suffix) \ |
| 334 | do { \ |
| 335 | int __cond = !(condition); \ |
| 336 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
| 337 | if (__cond) \ |
| 338 | prefix ## suffix(); \ |
| 339 | __compiletime_error_fallback(__cond); \ |
| 340 | } while (0) |
| 341 | #else |
| 342 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) |
| 343 | #endif |
| 344 | |
| 345 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
| 346 | __compiletime_assert(condition, msg, prefix, suffix) |
| 347 | |
| 348 | /** |
| 349 | * compiletime_assert - break build and emit msg if condition is false |
| 350 | * @condition: a compile-time constant condition to check |
| 351 | * @msg: a message to emit if condition is false |
| 352 | * |
| 353 | * In tradition of POSIX assert, this macro will break the build if the |
| 354 | * supplied condition is *false*, emitting the supplied error message if the |
| 355 | * compiler has support to do so. |
| 356 | */ |
| 357 | #define compiletime_assert(condition, msg) \ |
| 358 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
| 359 | |
| 360 | #define compiletime_assert_atomic_type(t) \ |
| 361 | compiletime_assert(__native_word(t), \ |
| 362 | "Need native word sized stores/loads for atomicity.") |
| 363 | |
| 364 | #endif /* __LINUX_COMPILER_H */ |