blob: c363df9c1b86b462b8cba15b2cce9d2583709d19 [file] [log] [blame]
Imre Kis3b32e7b2020-11-24 00:33:01 +01001/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 */
5
6#ifndef COMPILER_H
7#define COMPILER_H
8
9/*
10 * Macros that should be used instead of using __attribute__ directly to
11 * ease portability and make the code easier to read.
12 *
13 * Some of the defines below is known to sometimes cause conflicts when
14 * this file is included from xtest in normal world. It is assumed that
15 * the conflicting defines has the same meaning in that environment.
16 * Surrounding the troublesome defines with #ifndef should be enough.
17 */
18#define __deprecated __attribute__((deprecated))
19#ifndef __packed
20#define __packed __attribute__((packed))
21#endif
22#define __weak __attribute__((weak))
23#ifndef __noreturn
24#define __noreturn __attribute__((__noreturn__))
25#endif
26#define __pure __attribute__((pure))
27#define __aligned(x) __attribute__((aligned(x)))
28#define __printf(a, b) __attribute__((format(printf, a, b)))
29#define __noinline __attribute__((noinline))
30#define __attr_const __attribute__((__const__))
31#ifndef __unused
32#define __unused __attribute__((unused))
33#endif
34#define __maybe_unused __attribute__((unused))
35#ifndef __used
36#define __used __attribute__((__used__))
37#endif
38#define __must_check __attribute__((warn_unused_result))
39#define __cold __attribute__((__cold__))
40#define __section(x) __attribute__((section(x)))
41#define __data __section(".data")
42#define __bss __section(".bss")
43#ifdef __clang__
44#define __SECTION_FLAGS_RODATA
45#else
46/*
47 * Override sections flags/type generated by the C compiler to make sure they
48 * are: "a",%progbits (thus creating an allocatable, non-writeable, non-
49 * executable data section).
50 * The trailing '//' comments out the flags generated by the compiler.
51 * This avoids a harmless warning with GCC.
52 */
53#define __SECTION_FLAGS_RODATA ",\"a\",%progbits //"
54#endif
55#define __rodata __section(".rodata" __SECTION_FLAGS_RODATA)
56#define __rodata_unpaged __section(".rodata.__unpaged" __SECTION_FLAGS_RODATA)
57#ifdef CFG_VIRTUALIZATION
58#define __nex_bss __section(".nex_bss")
59#define __nex_data __section(".nex_data")
60#else /* CFG_VIRTUALIZATION */
61#define __nex_bss
62#define __nex_data
63#endif /* CFG_VIRTUALIZATION */
64#define __noprof __attribute__((no_instrument_function))
65#define __nostackcheck __attribute__((no_instrument_function))
66
67#define __compiler_bswap64(x) __builtin_bswap64((x))
68#define __compiler_bswap32(x) __builtin_bswap32((x))
69#define __compiler_bswap16(x) __builtin_bswap16((x))
70
71#define __GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
72 __GNUC_PATCHLEVEL__)
73
74#if __GCC_VERSION >= 50100 && !defined(__CHECKER__)
75#define __HAVE_BUILTIN_OVERFLOW 1
76#endif
77
78#ifdef __HAVE_BUILTIN_OVERFLOW
79#define __compiler_add_overflow(a, b, res) \
80 __builtin_add_overflow((a), (b), (res))
81
82#define __compiler_sub_overflow(a, b, res) \
83 __builtin_sub_overflow((a), (b), (res))
84
85#define __compiler_mul_overflow(a, b, res) \
86 __builtin_mul_overflow((a), (b), (res))
87#else /*!__HAVE_BUILTIN_OVERFLOW*/
88
89/*
90 * Copied/inspired from https://www.fefe.de/intof.html
91 */
92
93#define __INTOF_ASSIGN(dest, src) (__extension__({ \
94 typeof(src) __intof_x = (src); \
95 typeof(dest) __intof_y = __intof_x; \
96 (((uintmax_t)__intof_x == (uintmax_t)__intof_y) && \
97 ((__intof_x < 1) == (__intof_y < 1)) ? \
98 (void)((dest) = __intof_y) , 0 : 1); \
99}))
100
101#define __INTOF_ADD(c, a, b) (__extension__({ \
102 typeof(a) __intofa_a = (a); \
103 typeof(b) __intofa_b = (b); \
104 intmax_t __intofa_a_signed = __intofa_a; \
105 uintmax_t __intofa_a_unsigned = __intofa_a; \
106 intmax_t __intofa_b_signed = __intofa_b; \
107 uintmax_t __intofa_b_unsigned = __intofa_b; \
108 \
109 __intofa_b < 1 ? \
110 __intofa_a < 1 ? \
111 ((INTMAX_MIN - __intofa_b_signed <= \
112 __intofa_a_signed)) ? \
113 __INTOF_ASSIGN((c), __intofa_a_signed + \
114 __intofa_b_signed) : 1 \
115 : \
116 ((__intofa_a_unsigned >= (uintmax_t)-__intofa_b) ? \
117 __INTOF_ASSIGN((c), __intofa_a_unsigned + \
118 __intofa_b_signed) \
119 : \
120 __INTOF_ASSIGN((c), \
121 (intmax_t)(__intofa_a_unsigned + \
122 __intofa_b_signed))) \
123 : \
124 __intofa_a < 1 ? \
125 ((__intofa_b_unsigned >= (uintmax_t)-__intofa_a) ? \
126 __INTOF_ASSIGN((c), __intofa_a_signed + \
127 __intofa_b_unsigned) \
128 : \
129 __INTOF_ASSIGN((c), \
130 (intmax_t)(__intofa_a_signed + \
131 __intofa_b_unsigned))) \
132 : \
133 ((UINTMAX_MAX - __intofa_b_unsigned >= \
134 __intofa_a_unsigned) ? \
135 __INTOF_ASSIGN((c), __intofa_a_unsigned + \
136 __intofa_b_unsigned) : 1); \
137}))
138
139#define __INTOF_SUB(c, a, b) (__extension__({ \
140 typeof(a) __intofs_a = a; \
141 typeof(b) __intofs_b = b; \
142 intmax_t __intofs_a_signed = __intofs_a; \
143 uintmax_t __intofs_a_unsigned = __intofs_a; \
144 intmax_t __intofs_b_signed = __intofs_b; \
145 uintmax_t __intofs_b_unsigned = __intofs_b; \
146 \
147 __intofs_b < 1 ? \
148 __intofs_a < 1 ? \
149 ((INTMAX_MAX + __intofs_b_signed >= \
150 __intofs_a_signed) ? \
151 __INTOF_ASSIGN((c), __intofs_a_signed - \
152 __intofs_b_signed) : 1) \
153 : \
154 (((uintmax_t)(UINTMAX_MAX + __intofs_b_signed) >= \
155 __intofs_a_unsigned) ? \
156 __INTOF_ASSIGN((c), __intofs_a - \
157 __intofs_b) : 1) \
158 : \
159 __intofs_a < 1 ? \
160 (((intmax_t)(INTMAX_MIN + __intofs_b) <= \
161 __intofs_a_signed) ? \
162 __INTOF_ASSIGN((c), \
163 (intmax_t)(__intofs_a_signed - \
164 __intofs_b_unsigned)) : 1) \
165 : \
166 ((__intofs_b_unsigned <= __intofs_a_unsigned) ? \
167 __INTOF_ASSIGN((c), __intofs_a_unsigned - \
168 __intofs_b_unsigned) \
169 : \
170 __INTOF_ASSIGN((c), \
171 (intmax_t)(__intofs_a_unsigned - \
172 __intofs_b_unsigned))); \
173}))
174
175/*
176 * Dealing with detecting overflow in multiplication of integers.
177 *
178 * First step is to remove two corner cases with the minum signed integer
179 * which can't be represented as a positive integer + sign.
180 * Multiply with 0 or 1 can't overflow, no checking needed of the operation,
181 * only if it can be assigned to the result.
182 *
183 * After the corner cases are eliminated we convert the two factors to
184 * positive unsigned values, keeping track of the original in another
185 * variable which is used at the end to determine the sign of the product.
186 *
187 * The two terms (a and b) are divided into upper and lower half (x1 upper
188 * and x0 lower), so the product is:
189 * ((a1 << hshift) + a0) * ((b1 << hshift) + b0)
190 * which also is:
191 * ((a1 * b1) << (hshift * 2)) + (T1)
192 * ((a1 * b0 + a0 * b1) << hshift) + (T2)
193 * (a0 * b0) (T3)
194 *
195 * From this we can tell and (a1 * b1) has to be 0 or we'll overflow, that
196 * is, at least one of a1 or b1 has to be 0. Once this has been checked the
197 * addition: ((a1 * b0) << hshift) + ((a0 * b1) << hshift)
198 * isn't an addition as one of the terms will be 0.
199 *
200 * Since each factor in: (a0 * b0)
201 * only uses half the capicity of the underlaying type it can't overflow
202 *
203 * The addition of T2 and T3 can overflow so we use __INTOF_ADD() to
204 * perform that addition. If the addition succeeds without overflow the
205 * result is assigned the required sign and checked for overflow again.
206 */
207
208#define __intof_mul_negate ((__intof_oa < 1) != (__intof_ob < 1))
209#define __intof_mul_hshift (sizeof(uintmax_t) * 8 / 2)
210#define __intof_mul_hmask (UINTMAX_MAX >> __intof_mul_hshift)
211#define __intof_mul_a0 ((uintmax_t)(__intof_a) >> __intof_mul_hshift)
212#define __intof_mul_b0 ((uintmax_t)(__intof_b) >> __intof_mul_hshift)
213#define __intof_mul_a1 ((uintmax_t)(__intof_a) & __intof_mul_hmask)
214#define __intof_mul_b1 ((uintmax_t)(__intof_b) & __intof_mul_hmask)
215#define __intof_mul_t (__intof_mul_a1 * __intof_mul_b0 + \
216 __intof_mul_a0 * __intof_mul_b1)
217
218#define __INTOF_MUL(c, a, b) (__extension__({ \
219 typeof(a) __intof_oa = (a); \
220 typeof(a) __intof_a = __intof_oa < 1 ? -__intof_oa : __intof_oa; \
221 typeof(b) __intof_ob = (b); \
222 typeof(b) __intof_b = __intof_ob < 1 ? -__intof_ob : __intof_ob; \
223 typeof(c) __intof_c; \
224 \
225 __intof_oa == 0 || __intof_ob == 0 || \
226 __intof_oa == 1 || __intof_ob == 1 ? \
227 __INTOF_ASSIGN((c), __intof_oa * __intof_ob) : \
228 (__intof_mul_a0 && __intof_mul_b0) || \
229 __intof_mul_t > __intof_mul_hmask ? 1 : \
230 __INTOF_ADD((__intof_c), __intof_mul_t << __intof_mul_hshift, \
231 __intof_mul_a1 * __intof_mul_b1) ? 1 : \
232 __intof_mul_negate ? __INTOF_ASSIGN((c), -__intof_c) : \
233 __INTOF_ASSIGN((c), __intof_c); \
234}))
235
236#define __compiler_add_overflow(a, b, res) __INTOF_ADD(*(res), (a), (b))
237#define __compiler_sub_overflow(a, b, res) __INTOF_SUB(*(res), (a), (b))
238#define __compiler_mul_overflow(a, b, res) __INTOF_MUL(*(res), (a), (b))
239
240#endif /*!__HAVE_BUILTIN_OVERFLOW*/
241
242#define __compiler_compare_and_swap(p, oval, nval) \
243 __atomic_compare_exchange_n((p), (oval), (nval), true, \
244 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) \
245
246#define __compiler_atomic_load(p) __atomic_load_n((p), __ATOMIC_RELAXED)
247#define __compiler_atomic_store(p, val) \
248 __atomic_store_n((p), (val), __ATOMIC_RELAXED)
249
250#endif /*COMPILER_H*/