Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_COMPILER_H |
| 3 | #define __LINUX_COMPILER_H |
| 4 | |
| 5 | #include <linux/compiler_types.h> |
| 6 | |
| 7 | #ifndef __ASSEMBLY__ |
| 8 | |
| 9 | #ifdef __KERNEL__ |
| 10 | |
| 11 | /* |
| 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 13 | * to disable branch tracing on a per file basis. |
| 14 | */ |
| 15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
| 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
| 18 | int expect, int is_constant); |
| 19 | |
| 20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 22 | |
| 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
| 24 | long ______r; \ |
| 25 | static struct ftrace_likely_data \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 26 | __aligned(4) \ |
| 27 | __section(_ftrace_annotated_branch) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | ______f = { \ |
| 29 | .data.func = __func__, \ |
| 30 | .data.file = __FILE__, \ |
| 31 | .data.line = __LINE__, \ |
| 32 | }; \ |
| 33 | ______r = __builtin_expect(!!(x), expect); \ |
| 34 | ftrace_likely_update(&______f, ______r, \ |
| 35 | expect, is_constant); \ |
| 36 | ______r; \ |
| 37 | }) |
| 38 | |
| 39 | /* |
| 40 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 41 | * value is always the same. This idea is taken from a similar patch |
| 42 | * written by Daniel Walker. |
| 43 | */ |
| 44 | # ifndef likely |
| 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
| 46 | # endif |
| 47 | # ifndef unlikely |
| 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
| 49 | # endif |
| 50 | |
| 51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 52 | /* |
| 53 | * "Define 'is'", Bill Clinton |
| 54 | * "Define 'if'", Steven Rostedt |
| 55 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) |
| 57 | |
| 58 | #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) |
| 59 | |
| 60 | #define __trace_if_value(cond) ({ \ |
| 61 | static struct ftrace_branch_data \ |
| 62 | __aligned(4) \ |
| 63 | __section(_ftrace_branch) \ |
| 64 | __if_trace = { \ |
| 65 | .func = __func__, \ |
| 66 | .file = __FILE__, \ |
| 67 | .line = __LINE__, \ |
| 68 | }; \ |
| 69 | (cond) ? \ |
| 70 | (__if_trace.miss_hit[1]++,1) : \ |
| 71 | (__if_trace.miss_hit[0]++,0); \ |
| 72 | }) |
| 73 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 75 | |
| 76 | #else |
| 77 | # define likely(x) __builtin_expect(!!(x), 1) |
| 78 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 79 | #endif |
| 80 | |
| 81 | /* Optimization barrier */ |
| 82 | #ifndef barrier |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 83 | /* The "volatile" is due to gcc bugs */ |
| 84 | # define barrier() __asm__ __volatile__("": : :"memory") |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | #endif |
| 86 | |
| 87 | #ifndef barrier_data |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 88 | /* |
| 89 | * This version is i.e. to prevent dead stores elimination on @ptr |
| 90 | * where gcc and llvm may behave differently when otherwise using |
| 91 | * normal barrier(): while gcc behavior gets along with a normal |
| 92 | * barrier(), llvm needs an explicit input variable to be assumed |
| 93 | * clobbered. The issue is as follows: while the inline asm might |
| 94 | * access any memory it wants, the compiler could have fit all of |
| 95 | * @ptr into memory registers instead, and since @ptr never escaped |
| 96 | * from that, it proved that the inline asm wasn't touching any of |
| 97 | * it. This version works well with both compilers, i.e. we're telling |
| 98 | * the compiler that the inline asm absolutely may see the contents |
| 99 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 |
| 100 | */ |
| 101 | # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | #endif |
| 103 | |
| 104 | /* workaround for GCC PR82365 if needed */ |
| 105 | #ifndef barrier_before_unreachable |
| 106 | # define barrier_before_unreachable() do { } while (0) |
| 107 | #endif |
| 108 | |
| 109 | /* Unreachable code */ |
| 110 | #ifdef CONFIG_STACK_VALIDATION |
| 111 | /* |
| 112 | * These macros help objtool understand GCC code flow for unreachable code. |
| 113 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
| 114 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 115 | */ |
| 116 | #define annotate_reachable() ({ \ |
| 117 | asm volatile("%c0:\n\t" \ |
| 118 | ".pushsection .discard.reachable\n\t" \ |
| 119 | ".long %c0b - .\n\t" \ |
| 120 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 121 | }) |
| 122 | #define annotate_unreachable() ({ \ |
| 123 | asm volatile("%c0:\n\t" \ |
| 124 | ".pushsection .discard.unreachable\n\t" \ |
| 125 | ".long %c0b - .\n\t" \ |
| 126 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 127 | }) |
| 128 | #define ASM_UNREACHABLE \ |
| 129 | "999:\n\t" \ |
| 130 | ".pushsection .discard.unreachable\n\t" \ |
| 131 | ".long 999b - .\n\t" \ |
| 132 | ".popsection\n\t" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 133 | |
| 134 | /* Annotate a C jump table to allow objtool to follow the code flow */ |
| 135 | #define __annotate_jump_table __section(.rodata..c_jump_table) |
| 136 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 137 | #ifdef CONFIG_DEBUG_ENTRY |
| 138 | /* Begin/end of an instrumentation safe region */ |
| 139 | #define instrumentation_begin() ({ \ |
| 140 | asm volatile("%c0:\n\t" \ |
| 141 | ".pushsection .discard.instr_begin\n\t" \ |
| 142 | ".long %c0b - .\n\t" \ |
| 143 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 144 | }) |
| 145 | |
| 146 | /* |
| 147 | * Because instrumentation_{begin,end}() can nest, objtool validation considers |
| 148 | * _begin() a +1 and _end() a -1 and computes a sum over the instructions. |
| 149 | * When the value is greater than 0, we consider instrumentation allowed. |
| 150 | * |
| 151 | * There is a problem with code like: |
| 152 | * |
| 153 | * noinstr void foo() |
| 154 | * { |
| 155 | * instrumentation_begin(); |
| 156 | * ... |
| 157 | * if (cond) { |
| 158 | * instrumentation_begin(); |
| 159 | * ... |
| 160 | * instrumentation_end(); |
| 161 | * } |
| 162 | * bar(); |
| 163 | * instrumentation_end(); |
| 164 | * } |
| 165 | * |
| 166 | * If instrumentation_end() would be an empty label, like all the other |
| 167 | * annotations, the inner _end(), which is at the end of a conditional block, |
| 168 | * would land on the instruction after the block. |
| 169 | * |
| 170 | * If we then consider the sum of the !cond path, we'll see that the call to |
| 171 | * bar() is with a 0-value, even though, we meant it to happen with a positive |
| 172 | * value. |
| 173 | * |
| 174 | * To avoid this, have _end() be a NOP instruction, this ensures it will be |
| 175 | * part of the condition block and does not escape. |
| 176 | */ |
| 177 | #define instrumentation_end() ({ \ |
| 178 | asm volatile("%c0: nop\n\t" \ |
| 179 | ".pushsection .discard.instr_end\n\t" \ |
| 180 | ".long %c0b - .\n\t" \ |
| 181 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
| 182 | }) |
| 183 | #endif /* CONFIG_DEBUG_ENTRY */ |
| 184 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | #else |
| 186 | #define annotate_reachable() |
| 187 | #define annotate_unreachable() |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 188 | #define __annotate_jump_table |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | #endif |
| 190 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 191 | #ifndef instrumentation_begin |
| 192 | #define instrumentation_begin() do { } while(0) |
| 193 | #define instrumentation_end() do { } while(0) |
| 194 | #endif |
| 195 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 196 | #ifndef ASM_UNREACHABLE |
| 197 | # define ASM_UNREACHABLE |
| 198 | #endif |
| 199 | #ifndef unreachable |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 200 | # define unreachable() do { \ |
| 201 | annotate_unreachable(); \ |
| 202 | __builtin_unreachable(); \ |
| 203 | } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | #endif |
| 205 | |
| 206 | /* |
| 207 | * KENTRY - kernel entry point |
| 208 | * This can be used to annotate symbols (functions or data) that are used |
| 209 | * without their linker symbol being referenced explicitly. For example, |
| 210 | * interrupt vector handlers, or functions in the kernel image that are found |
| 211 | * programatically. |
| 212 | * |
| 213 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 214 | * are handled in their own way (with KEEP() in linker scripts). |
| 215 | * |
| 216 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 217 | * linker script. For example an architecture could KEEP() its entire |
| 218 | * boot/exception vector code rather than annotate each function and data. |
| 219 | */ |
| 220 | #ifndef KENTRY |
| 221 | # define KENTRY(sym) \ |
| 222 | extern typeof(sym) sym; \ |
| 223 | static const unsigned long __kentry_##sym \ |
| 224 | __used \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 225 | __section("___kentry" "+" #sym ) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 226 | = (unsigned long)&sym; |
| 227 | #endif |
| 228 | |
| 229 | #ifndef RELOC_HIDE |
| 230 | # define RELOC_HIDE(ptr, off) \ |
| 231 | ({ unsigned long __ptr; \ |
| 232 | __ptr = (unsigned long) (ptr); \ |
| 233 | (typeof(ptr)) (__ptr + (off)); }) |
| 234 | #endif |
| 235 | |
| 236 | #ifndef OPTIMIZER_HIDE_VAR |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 237 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 238 | #define OPTIMIZER_HIDE_VAR(var) \ |
| 239 | __asm__ ("" : "=r" (var) : "0" (var)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 240 | #endif |
| 241 | |
| 242 | /* Not-quite-unique ID. */ |
| 243 | #ifndef __UNIQUE_ID |
| 244 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 245 | #endif |
| 246 | |
| 247 | #include <uapi/linux/types.h> |
| 248 | |
| 249 | #define __READ_ONCE_SIZE \ |
| 250 | ({ \ |
| 251 | switch (size) { \ |
| 252 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
| 253 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
| 254 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
| 255 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
| 256 | default: \ |
| 257 | barrier(); \ |
| 258 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
| 259 | barrier(); \ |
| 260 | } \ |
| 261 | }) |
| 262 | |
| 263 | static __always_inline |
| 264 | void __read_once_size(const volatile void *p, void *res, int size) |
| 265 | { |
| 266 | __READ_ONCE_SIZE; |
| 267 | } |
| 268 | |
| 269 | #ifdef CONFIG_KASAN |
| 270 | /* |
| 271 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
| 272 | * with inlining. Attempt to inline it may cause a build failure. |
| 273 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 274 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 275 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 276 | # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 277 | #else |
| 278 | # define __no_kasan_or_inline __always_inline |
| 279 | #endif |
| 280 | |
| 281 | static __no_kasan_or_inline |
| 282 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 283 | { |
| 284 | __READ_ONCE_SIZE; |
| 285 | } |
| 286 | |
| 287 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 288 | { |
| 289 | switch (size) { |
| 290 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
| 291 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
| 292 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
| 293 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
| 294 | default: |
| 295 | barrier(); |
| 296 | __builtin_memcpy((void *)p, (const void *)res, size); |
| 297 | barrier(); |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Prevent the compiler from merging or refetching reads or writes. The |
| 303 | * compiler is also forbidden from reordering successive instances of |
| 304 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 305 | * particular ordering. One way to make the compiler aware of ordering is to |
| 306 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 307 | * statements. |
| 308 | * |
| 309 | * These two macros will also work on aggregate data types like structs or |
| 310 | * unions. If the size of the accessed data type exceeds the word size of |
| 311 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 312 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 313 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 314 | * - '__u' allocated on the stack. |
| 315 | * |
| 316 | * Their two major use cases are: (1) Mediating communication between |
| 317 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 318 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 319 | * mutilate accesses that either do not require ordering or that interact |
| 320 | * with an explicit memory barrier or atomic instruction that provides the |
| 321 | * required ordering. |
| 322 | */ |
| 323 | #include <asm/barrier.h> |
| 324 | #include <linux/kasan-checks.h> |
| 325 | |
| 326 | #define __READ_ONCE(x, check) \ |
| 327 | ({ \ |
| 328 | union { typeof(x) __val; char __c[1]; } __u; \ |
| 329 | if (check) \ |
| 330 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 331 | else \ |
| 332 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
| 333 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ |
| 334 | __u.__val; \ |
| 335 | }) |
| 336 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
| 337 | |
| 338 | /* |
| 339 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
| 340 | * to hide memory access from KASAN. |
| 341 | */ |
| 342 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
| 343 | |
| 344 | static __no_kasan_or_inline |
| 345 | unsigned long read_word_at_a_time(const void *addr) |
| 346 | { |
| 347 | kasan_check_read(addr, 1); |
| 348 | return *(unsigned long *)addr; |
| 349 | } |
| 350 | |
| 351 | #define WRITE_ONCE(x, val) \ |
| 352 | ({ \ |
| 353 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 354 | { .__val = (__force typeof(x)) (val) }; \ |
| 355 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 356 | __u.__val; \ |
| 357 | }) |
| 358 | |
| 359 | #endif /* __KERNEL__ */ |
| 360 | |
| 361 | /* |
| 362 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
| 363 | * it from inline assembler. Necessary in case 'sym' could be inlined |
| 364 | * otherwise, or eliminated entirely due to lack of references that are |
| 365 | * visible to the compiler. |
| 366 | */ |
| 367 | #define __ADDRESSABLE(sym) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 368 | static void * __section(.discard.addressable) __used \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 369 | __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; |
| 370 | |
| 371 | /** |
| 372 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
| 373 | * @off: the address of the 32-bit offset value |
| 374 | */ |
| 375 | static inline void *offset_to_ptr(const int *off) |
| 376 | { |
| 377 | return (void *)((unsigned long)off + *off); |
| 378 | } |
| 379 | |
| 380 | #endif /* __ASSEMBLY__ */ |
| 381 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | /* Compile time object size, -1 for unknown */ |
| 383 | #ifndef __compiletime_object_size |
| 384 | # define __compiletime_object_size(obj) -1 |
| 385 | #endif |
| 386 | #ifndef __compiletime_warning |
| 387 | # define __compiletime_warning(message) |
| 388 | #endif |
| 389 | #ifndef __compiletime_error |
| 390 | # define __compiletime_error(message) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | #endif |
| 392 | |
| 393 | #ifdef __OPTIMIZE__ |
| 394 | # define __compiletime_assert(condition, msg, prefix, suffix) \ |
| 395 | do { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 396 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 397 | if (!(condition)) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | prefix ## suffix(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 399 | } while (0) |
| 400 | #else |
| 401 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) |
| 402 | #endif |
| 403 | |
| 404 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
| 405 | __compiletime_assert(condition, msg, prefix, suffix) |
| 406 | |
| 407 | /** |
| 408 | * compiletime_assert - break build and emit msg if condition is false |
| 409 | * @condition: a compile-time constant condition to check |
| 410 | * @msg: a message to emit if condition is false |
| 411 | * |
| 412 | * In tradition of POSIX assert, this macro will break the build if the |
| 413 | * supplied condition is *false*, emitting the supplied error message if the |
| 414 | * compiler has support to do so. |
| 415 | */ |
| 416 | #define compiletime_assert(condition, msg) \ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 417 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 418 | |
| 419 | #define compiletime_assert_atomic_type(t) \ |
| 420 | compiletime_assert(__native_word(t), \ |
| 421 | "Need native word sized stores/loads for atomicity.") |
| 422 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 424 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
| 425 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 426 | /* |
| 427 | * This is needed in functions which generate the stack canary, see |
| 428 | * arch/x86/kernel/smpboot.c::start_secondary() for an example. |
| 429 | */ |
| 430 | #define prevent_tail_call_optimization() mb() |
| 431 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 432 | #endif /* __LINUX_COMPILER_H */ |