blob: d0354c5428b002df211fc5974b488b728652c482 [file] [log] [blame]
Imre Kis3b32e7b2020-11-24 00:33:01 +01001/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
Imre Kisf6d55a62021-10-18 11:45:31 +02004 * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
Imre Kis3b32e7b2020-11-24 00:33:01 +01005 */
6
7#ifndef COMPILER_H
8#define COMPILER_H
9
Imre Kisf6d55a62021-10-18 11:45:31 +020010#include <sys/cdefs.h>
11
Imre Kis3b32e7b2020-11-24 00:33:01 +010012/*
13 * Macros that should be used instead of using __attribute__ directly to
14 * ease portability and make the code easier to read.
15 *
16 * Some of the defines below is known to sometimes cause conflicts when
17 * this file is included from xtest in normal world. It is assumed that
18 * the conflicting defines has the same meaning in that environment.
19 * Surrounding the troublesome defines with #ifndef should be enough.
20 */
Imre Kisf6d55a62021-10-18 11:45:31 +020021#ifndef __deprecated
Imre Kis3b32e7b2020-11-24 00:33:01 +010022#define __deprecated __attribute__((deprecated))
Imre Kisf6d55a62021-10-18 11:45:31 +020023#endif
Imre Kis3b32e7b2020-11-24 00:33:01 +010024#ifndef __packed
25#define __packed __attribute__((packed))
26#endif
Imre Kisf6d55a62021-10-18 11:45:31 +020027#ifndef __weak
Imre Kis3b32e7b2020-11-24 00:33:01 +010028#define __weak __attribute__((weak))
Imre Kisf6d55a62021-10-18 11:45:31 +020029#endif
Imre Kis3b32e7b2020-11-24 00:33:01 +010030#ifndef __noreturn
31#define __noreturn __attribute__((__noreturn__))
32#endif
Imre Kisf6d55a62021-10-18 11:45:31 +020033#ifndef __pure
Imre Kis3b32e7b2020-11-24 00:33:01 +010034#define __pure __attribute__((pure))
Imre Kisf6d55a62021-10-18 11:45:31 +020035#endif
36#ifndef __aligned
Imre Kis3b32e7b2020-11-24 00:33:01 +010037#define __aligned(x) __attribute__((aligned(x)))
Imre Kisf6d55a62021-10-18 11:45:31 +020038#endif
39#ifndef __printf
Imre Kis3b32e7b2020-11-24 00:33:01 +010040#define __printf(a, b) __attribute__((format(printf, a, b)))
Imre Kisf6d55a62021-10-18 11:45:31 +020041#endif
42#ifndef __noinline
Imre Kis3b32e7b2020-11-24 00:33:01 +010043#define __noinline __attribute__((noinline))
Imre Kisf6d55a62021-10-18 11:45:31 +020044#endif
45#ifndef __attr_const
Imre Kis3b32e7b2020-11-24 00:33:01 +010046#define __attr_const __attribute__((__const__))
Imre Kisf6d55a62021-10-18 11:45:31 +020047#endif
Imre Kis3b32e7b2020-11-24 00:33:01 +010048#ifndef __unused
49#define __unused __attribute__((unused))
50#endif
51#define __maybe_unused __attribute__((unused))
52#ifndef __used
53#define __used __attribute__((__used__))
54#endif
Imre Kisf6d55a62021-10-18 11:45:31 +020055#ifndef __must_check
Imre Kis3b32e7b2020-11-24 00:33:01 +010056#define __must_check __attribute__((warn_unused_result))
Imre Kisf6d55a62021-10-18 11:45:31 +020057#endif
58#ifndef __cold
Imre Kis3b32e7b2020-11-24 00:33:01 +010059#define __cold __attribute__((__cold__))
Imre Kisf6d55a62021-10-18 11:45:31 +020060#endif
61#ifndef __section
Imre Kis3b32e7b2020-11-24 00:33:01 +010062#define __section(x) __attribute__((section(x)))
Imre Kisf6d55a62021-10-18 11:45:31 +020063#endif
Imre Kis3b32e7b2020-11-24 00:33:01 +010064#define __data __section(".data")
65#define __bss __section(".bss")
66#ifdef __clang__
67#define __SECTION_FLAGS_RODATA
68#else
69/*
70 * Override sections flags/type generated by the C compiler to make sure they
71 * are: "a",%progbits (thus creating an allocatable, non-writeable, non-
72 * executable data section).
73 * The trailing '//' comments out the flags generated by the compiler.
74 * This avoids a harmless warning with GCC.
75 */
76#define __SECTION_FLAGS_RODATA ",\"a\",%progbits //"
77#endif
78#define __rodata __section(".rodata" __SECTION_FLAGS_RODATA)
79#define __rodata_unpaged __section(".rodata.__unpaged" __SECTION_FLAGS_RODATA)
80#ifdef CFG_VIRTUALIZATION
81#define __nex_bss __section(".nex_bss")
82#define __nex_data __section(".nex_data")
83#else /* CFG_VIRTUALIZATION */
84#define __nex_bss
85#define __nex_data
86#endif /* CFG_VIRTUALIZATION */
87#define __noprof __attribute__((no_instrument_function))
88#define __nostackcheck __attribute__((no_instrument_function))
89
90#define __compiler_bswap64(x) __builtin_bswap64((x))
91#define __compiler_bswap32(x) __builtin_bswap32((x))
92#define __compiler_bswap16(x) __builtin_bswap16((x))
93
94#define __GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
95 __GNUC_PATCHLEVEL__)
96
97#if __GCC_VERSION >= 50100 && !defined(__CHECKER__)
98#define __HAVE_BUILTIN_OVERFLOW 1
99#endif
100
101#ifdef __HAVE_BUILTIN_OVERFLOW
102#define __compiler_add_overflow(a, b, res) \
103 __builtin_add_overflow((a), (b), (res))
104
105#define __compiler_sub_overflow(a, b, res) \
106 __builtin_sub_overflow((a), (b), (res))
107
108#define __compiler_mul_overflow(a, b, res) \
109 __builtin_mul_overflow((a), (b), (res))
110#else /*!__HAVE_BUILTIN_OVERFLOW*/
111
112/*
113 * Copied/inspired from https://www.fefe.de/intof.html
114 */
115
116#define __INTOF_ASSIGN(dest, src) (__extension__({ \
117 typeof(src) __intof_x = (src); \
118 typeof(dest) __intof_y = __intof_x; \
119 (((uintmax_t)__intof_x == (uintmax_t)__intof_y) && \
120 ((__intof_x < 1) == (__intof_y < 1)) ? \
121 (void)((dest) = __intof_y) , 0 : 1); \
122}))
123
124#define __INTOF_ADD(c, a, b) (__extension__({ \
125 typeof(a) __intofa_a = (a); \
126 typeof(b) __intofa_b = (b); \
127 intmax_t __intofa_a_signed = __intofa_a; \
128 uintmax_t __intofa_a_unsigned = __intofa_a; \
129 intmax_t __intofa_b_signed = __intofa_b; \
130 uintmax_t __intofa_b_unsigned = __intofa_b; \
131 \
132 __intofa_b < 1 ? \
133 __intofa_a < 1 ? \
134 ((INTMAX_MIN - __intofa_b_signed <= \
135 __intofa_a_signed)) ? \
136 __INTOF_ASSIGN((c), __intofa_a_signed + \
137 __intofa_b_signed) : 1 \
138 : \
139 ((__intofa_a_unsigned >= (uintmax_t)-__intofa_b) ? \
140 __INTOF_ASSIGN((c), __intofa_a_unsigned + \
141 __intofa_b_signed) \
142 : \
143 __INTOF_ASSIGN((c), \
144 (intmax_t)(__intofa_a_unsigned + \
145 __intofa_b_signed))) \
146 : \
147 __intofa_a < 1 ? \
148 ((__intofa_b_unsigned >= (uintmax_t)-__intofa_a) ? \
149 __INTOF_ASSIGN((c), __intofa_a_signed + \
150 __intofa_b_unsigned) \
151 : \
152 __INTOF_ASSIGN((c), \
153 (intmax_t)(__intofa_a_signed + \
154 __intofa_b_unsigned))) \
155 : \
156 ((UINTMAX_MAX - __intofa_b_unsigned >= \
157 __intofa_a_unsigned) ? \
158 __INTOF_ASSIGN((c), __intofa_a_unsigned + \
159 __intofa_b_unsigned) : 1); \
160}))
161
162#define __INTOF_SUB(c, a, b) (__extension__({ \
163 typeof(a) __intofs_a = a; \
164 typeof(b) __intofs_b = b; \
165 intmax_t __intofs_a_signed = __intofs_a; \
166 uintmax_t __intofs_a_unsigned = __intofs_a; \
167 intmax_t __intofs_b_signed = __intofs_b; \
168 uintmax_t __intofs_b_unsigned = __intofs_b; \
169 \
170 __intofs_b < 1 ? \
171 __intofs_a < 1 ? \
172 ((INTMAX_MAX + __intofs_b_signed >= \
173 __intofs_a_signed) ? \
174 __INTOF_ASSIGN((c), __intofs_a_signed - \
175 __intofs_b_signed) : 1) \
176 : \
177 (((uintmax_t)(UINTMAX_MAX + __intofs_b_signed) >= \
178 __intofs_a_unsigned) ? \
179 __INTOF_ASSIGN((c), __intofs_a - \
180 __intofs_b) : 1) \
181 : \
182 __intofs_a < 1 ? \
183 (((intmax_t)(INTMAX_MIN + __intofs_b) <= \
184 __intofs_a_signed) ? \
185 __INTOF_ASSIGN((c), \
186 (intmax_t)(__intofs_a_signed - \
187 __intofs_b_unsigned)) : 1) \
188 : \
189 ((__intofs_b_unsigned <= __intofs_a_unsigned) ? \
190 __INTOF_ASSIGN((c), __intofs_a_unsigned - \
191 __intofs_b_unsigned) \
192 : \
193 __INTOF_ASSIGN((c), \
194 (intmax_t)(__intofs_a_unsigned - \
195 __intofs_b_unsigned))); \
196}))
197
198/*
199 * Dealing with detecting overflow in multiplication of integers.
200 *
201 * First step is to remove two corner cases with the minum signed integer
202 * which can't be represented as a positive integer + sign.
203 * Multiply with 0 or 1 can't overflow, no checking needed of the operation,
204 * only if it can be assigned to the result.
205 *
206 * After the corner cases are eliminated we convert the two factors to
207 * positive unsigned values, keeping track of the original in another
208 * variable which is used at the end to determine the sign of the product.
209 *
210 * The two terms (a and b) are divided into upper and lower half (x1 upper
211 * and x0 lower), so the product is:
212 * ((a1 << hshift) + a0) * ((b1 << hshift) + b0)
213 * which also is:
214 * ((a1 * b1) << (hshift * 2)) + (T1)
215 * ((a1 * b0 + a0 * b1) << hshift) + (T2)
216 * (a0 * b0) (T3)
217 *
218 * From this we can tell and (a1 * b1) has to be 0 or we'll overflow, that
219 * is, at least one of a1 or b1 has to be 0. Once this has been checked the
220 * addition: ((a1 * b0) << hshift) + ((a0 * b1) << hshift)
221 * isn't an addition as one of the terms will be 0.
222 *
223 * Since each factor in: (a0 * b0)
224 * only uses half the capicity of the underlaying type it can't overflow
225 *
226 * The addition of T2 and T3 can overflow so we use __INTOF_ADD() to
227 * perform that addition. If the addition succeeds without overflow the
228 * result is assigned the required sign and checked for overflow again.
229 */
230
231#define __intof_mul_negate ((__intof_oa < 1) != (__intof_ob < 1))
232#define __intof_mul_hshift (sizeof(uintmax_t) * 8 / 2)
233#define __intof_mul_hmask (UINTMAX_MAX >> __intof_mul_hshift)
234#define __intof_mul_a0 ((uintmax_t)(__intof_a) >> __intof_mul_hshift)
235#define __intof_mul_b0 ((uintmax_t)(__intof_b) >> __intof_mul_hshift)
236#define __intof_mul_a1 ((uintmax_t)(__intof_a) & __intof_mul_hmask)
237#define __intof_mul_b1 ((uintmax_t)(__intof_b) & __intof_mul_hmask)
238#define __intof_mul_t (__intof_mul_a1 * __intof_mul_b0 + \
239 __intof_mul_a0 * __intof_mul_b1)
240
241#define __INTOF_MUL(c, a, b) (__extension__({ \
242 typeof(a) __intof_oa = (a); \
243 typeof(a) __intof_a = __intof_oa < 1 ? -__intof_oa : __intof_oa; \
244 typeof(b) __intof_ob = (b); \
245 typeof(b) __intof_b = __intof_ob < 1 ? -__intof_ob : __intof_ob; \
246 typeof(c) __intof_c; \
247 \
248 __intof_oa == 0 || __intof_ob == 0 || \
249 __intof_oa == 1 || __intof_ob == 1 ? \
250 __INTOF_ASSIGN((c), __intof_oa * __intof_ob) : \
251 (__intof_mul_a0 && __intof_mul_b0) || \
252 __intof_mul_t > __intof_mul_hmask ? 1 : \
253 __INTOF_ADD((__intof_c), __intof_mul_t << __intof_mul_hshift, \
254 __intof_mul_a1 * __intof_mul_b1) ? 1 : \
255 __intof_mul_negate ? __INTOF_ASSIGN((c), -__intof_c) : \
256 __INTOF_ASSIGN((c), __intof_c); \
257}))
258
259#define __compiler_add_overflow(a, b, res) __INTOF_ADD(*(res), (a), (b))
260#define __compiler_sub_overflow(a, b, res) __INTOF_SUB(*(res), (a), (b))
261#define __compiler_mul_overflow(a, b, res) __INTOF_MUL(*(res), (a), (b))
262
263#endif /*!__HAVE_BUILTIN_OVERFLOW*/
264
265#define __compiler_compare_and_swap(p, oval, nval) \
266 __atomic_compare_exchange_n((p), (oval), (nval), true, \
267 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) \
268
269#define __compiler_atomic_load(p) __atomic_load_n((p), __ATOMIC_RELAXED)
270#define __compiler_atomic_store(p, val) \
271 __atomic_store_n((p), (val), __ATOMIC_RELAXED)
272
273#endif /*COMPILER_H*/