Copying utility headers from optee_os.

Provides an interface for the following common tasks:
  - defining functions in assembly
  - setting function and variable attributes
  - safe integer operations

The files in this commit were forked from optee_os, keeping the
original license header intact:
    tag name: v3.10.0
    tagged commit: d1c635434c55b7d75eadf471bde04926bd1e50a7

From 'lib/libutils/ext/include/' of optee_os to
    'components/common/utils/include/':
        asm.S
        compiler.h
        util.h

Change-Id: I5d83618fa157a4b14f86e522c5def40c74c4ad95
Signed-off-by: Imre Kis <imre.kis@arm.com>
diff --git a/components/common/utils/component.cmake b/components/common/utils/component.cmake
new file mode 100644
index 0000000..363a464
--- /dev/null
+++ b/components/common/utils/component.cmake
@@ -0,0 +1,15 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2020, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+if (NOT DEFINED TGT)
+	message(FATAL_ERROR "mandatory parameter TGT is not defined.")
+endif()
+
+
+target_include_directories(${TGT}
+	 PUBLIC
+		"${CMAKE_CURRENT_LIST_DIR}/include"
+	)
diff --git a/components/common/utils/include/asm.S b/components/common/utils/include/asm.S
new file mode 100644
index 0000000..d3cecc3
--- /dev/null
+++ b/components/common/utils/include/asm.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ */
+
+	.macro FUNC name colon section=default
+	.ifc	\section\(),default
+	.section .text.\name
+	.else
+	.section \section , "ax" , %progbits
+	.endif
+	.global \name
+	.type \name , %function
+	.balign 4
+	\name \colon
+	.endm
+
+	.macro DATA name colon
+	.global \name
+	.type \name , %object
+	\name \colon
+	.endm
+
+	.macro LOCAL_FUNC name colon section=default
+	.ifc	\section\(),default
+	.section .text.\name
+	.else
+	.section \section , "ax" , %progbits
+	.endif
+	.type \name , %function
+	.balign 4
+	\name \colon
+	.endm
+
+	.macro LOCAL_DATA name colon
+	.type \name , %object
+	\name \colon
+	.endm
+
+	.macro END_DATA name
+	.size \name , .-\name
+	.endm
+
+	.macro END_FUNC name
+	.size \name , .-\name
+	.endm
diff --git a/components/common/utils/include/compiler.h b/components/common/utils/include/compiler.h
new file mode 100644
index 0000000..c363df9
--- /dev/null
+++ b/components/common/utils/include/compiler.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ */
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+/*
+ * Macros that should be used instead of using __attribute__ directly to
+ * ease portability and make the code easier to read.
+ *
+ * Some of the defines below is known to sometimes cause conflicts when
+ * this file is included from xtest in normal world. It is assumed that
+ * the conflicting defines has the same meaning in that environment.
+ * Surrounding the troublesome defines with #ifndef should be enough.
+ */
+#define __deprecated	__attribute__((deprecated))
+#ifndef __packed
+#define __packed	__attribute__((packed))
+#endif
+#define __weak		__attribute__((weak))
+#ifndef __noreturn
+#define __noreturn	__attribute__((__noreturn__))
+#endif
+#define __pure		__attribute__((pure))
+#define __aligned(x)	__attribute__((aligned(x)))
+#define __printf(a, b)	__attribute__((format(printf, a, b)))
+#define __noinline	__attribute__((noinline))
+#define __attr_const	__attribute__((__const__))
+#ifndef __unused
+#define __unused	__attribute__((unused))
+#endif
+#define __maybe_unused	__attribute__((unused))
+#ifndef __used
+#define __used		__attribute__((__used__))
+#endif
+#define __must_check	__attribute__((warn_unused_result))
+#define __cold		__attribute__((__cold__))
+#define __section(x)	__attribute__((section(x)))
+#define __data		__section(".data")
+#define __bss		__section(".bss")
+#ifdef __clang__
+#define __SECTION_FLAGS_RODATA
+#else
+/*
+ * Override sections flags/type generated by the C compiler to make sure they
+ * are: "a",%progbits (thus creating an allocatable, non-writeable, non-
+ * executable data section).
+ * The trailing '//' comments out the flags generated by the compiler.
+ * This avoids a harmless warning with GCC.
+ */
+#define __SECTION_FLAGS_RODATA ",\"a\",%progbits //"
+#endif
+#define __rodata	__section(".rodata" __SECTION_FLAGS_RODATA)
+#define __rodata_unpaged __section(".rodata.__unpaged" __SECTION_FLAGS_RODATA)
+#ifdef CFG_VIRTUALIZATION
+#define __nex_bss		__section(".nex_bss")
+#define __nex_data		__section(".nex_data")
+#else  /* CFG_VIRTUALIZATION */
+#define __nex_bss
+#define __nex_data
+#endif	/* CFG_VIRTUALIZATION */
+#define __noprof	__attribute__((no_instrument_function))
+#define __nostackcheck	__attribute__((no_instrument_function))
+
+#define __compiler_bswap64(x)	__builtin_bswap64((x))
+#define __compiler_bswap32(x)	__builtin_bswap32((x))
+#define __compiler_bswap16(x)	__builtin_bswap16((x))
+
+#define __GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
+		       __GNUC_PATCHLEVEL__)
+
+#if __GCC_VERSION >= 50100 && !defined(__CHECKER__)
+#define __HAVE_BUILTIN_OVERFLOW 1
+#endif
+
+#ifdef __HAVE_BUILTIN_OVERFLOW
+#define __compiler_add_overflow(a, b, res) \
+	__builtin_add_overflow((a), (b), (res))
+
+#define __compiler_sub_overflow(a, b, res) \
+	__builtin_sub_overflow((a), (b), (res))
+
+#define __compiler_mul_overflow(a, b, res) \
+	__builtin_mul_overflow((a), (b), (res))
+#else /*!__HAVE_BUILTIN_OVERFLOW*/
+
+/*
+ * Copied/inspired from https://www.fefe.de/intof.html
+ */
+
+#define __INTOF_ASSIGN(dest, src) (__extension__({ \
+	typeof(src) __intof_x = (src); \
+	typeof(dest) __intof_y = __intof_x; \
+	(((uintmax_t)__intof_x == (uintmax_t)__intof_y) && \
+	 ((__intof_x < 1) == (__intof_y < 1)) ? \
+		(void)((dest) = __intof_y) , 0 : 1); \
+}))
+
+#define __INTOF_ADD(c, a, b) (__extension__({ \
+	typeof(a) __intofa_a = (a); \
+	typeof(b) __intofa_b = (b); \
+	intmax_t __intofa_a_signed = __intofa_a; \
+	uintmax_t __intofa_a_unsigned = __intofa_a; \
+	intmax_t __intofa_b_signed = __intofa_b; \
+	uintmax_t __intofa_b_unsigned = __intofa_b; \
+	\
+	__intofa_b < 1 ? \
+		__intofa_a < 1 ? \
+			((INTMAX_MIN - __intofa_b_signed <= \
+			  __intofa_a_signed)) ? \
+				__INTOF_ASSIGN((c), __intofa_a_signed + \
+						    __intofa_b_signed) : 1 \
+		: \
+			((__intofa_a_unsigned >= (uintmax_t)-__intofa_b) ? \
+				__INTOF_ASSIGN((c), __intofa_a_unsigned + \
+						    __intofa_b_signed) \
+			: \
+				__INTOF_ASSIGN((c), \
+					(intmax_t)(__intofa_a_unsigned + \
+						   __intofa_b_signed))) \
+	: \
+		__intofa_a < 1 ? \
+			((__intofa_b_unsigned >= (uintmax_t)-__intofa_a) ? \
+				__INTOF_ASSIGN((c), __intofa_a_signed + \
+						    __intofa_b_unsigned) \
+			: \
+				__INTOF_ASSIGN((c), \
+					(intmax_t)(__intofa_a_signed + \
+						   __intofa_b_unsigned))) \
+		: \
+			((UINTMAX_MAX - __intofa_b_unsigned >= \
+			  __intofa_a_unsigned) ? \
+				__INTOF_ASSIGN((c), __intofa_a_unsigned + \
+						    __intofa_b_unsigned) : 1); \
+}))
+
+#define __INTOF_SUB(c, a, b) (__extension__({ \
+	typeof(a) __intofs_a = a; \
+	typeof(b) __intofs_b = b; \
+	intmax_t __intofs_a_signed = __intofs_a; \
+	uintmax_t __intofs_a_unsigned = __intofs_a; \
+	intmax_t __intofs_b_signed = __intofs_b; \
+	uintmax_t __intofs_b_unsigned = __intofs_b; \
+	\
+	__intofs_b < 1 ? \
+		__intofs_a < 1 ? \
+			((INTMAX_MAX + __intofs_b_signed >= \
+			  __intofs_a_signed) ? \
+				__INTOF_ASSIGN((c), __intofs_a_signed - \
+						    __intofs_b_signed) : 1) \
+		: \
+			(((uintmax_t)(UINTMAX_MAX + __intofs_b_signed) >= \
+			  __intofs_a_unsigned) ? \
+				__INTOF_ASSIGN((c), __intofs_a - \
+						    __intofs_b) : 1) \
+	: \
+		__intofs_a < 1 ? \
+			(((intmax_t)(INTMAX_MIN + __intofs_b) <= \
+			  __intofs_a_signed) ? \
+				__INTOF_ASSIGN((c), \
+					(intmax_t)(__intofs_a_signed - \
+						   __intofs_b_unsigned)) : 1) \
+		: \
+			((__intofs_b_unsigned <= __intofs_a_unsigned) ? \
+				__INTOF_ASSIGN((c), __intofs_a_unsigned - \
+						    __intofs_b_unsigned) \
+			: \
+				__INTOF_ASSIGN((c), \
+					(intmax_t)(__intofs_a_unsigned - \
+						   __intofs_b_unsigned))); \
+}))
+
+/*
+ * Dealing with detecting overflow in multiplication of integers.
+ *
+ * First step is to remove two corner cases with the minum signed integer
+ * which can't be represented as a positive integer + sign.
+ * Multiply with 0 or 1 can't overflow, no checking needed of the operation,
+ * only if it can be assigned to the result.
+ *
+ * After the corner cases are eliminated we convert the two factors to
+ * positive unsigned values, keeping track of the original in another
+ * variable which is used at the end to determine the sign of the product.
+ *
+ * The two terms (a and b) are divided into upper and lower half (x1 upper
+ * and x0 lower), so the product is:
+ * ((a1 << hshift) + a0) * ((b1 << hshift) + b0)
+ * which also is:
+ * ((a1 * b1) << (hshift * 2)) +				(T1)
+ * ((a1 * b0 + a0 * b1) << hshift) +				(T2)
+ * (a0 * b0)							(T3)
+ *
+ * From this we can tell and (a1 * b1) has to be 0 or we'll overflow, that
+ * is, at least one of a1 or b1 has to be 0. Once this has been checked the
+ * addition: ((a1 * b0) << hshift) + ((a0 * b1) << hshift)
+ * isn't an addition as one of the terms will be 0.
+ *
+ * Since each factor in: (a0 * b0)
+ * only uses half the capicity of the underlaying type it can't overflow
+ *
+ * The addition of T2 and T3 can overflow so we use __INTOF_ADD() to
+ * perform that addition. If the addition succeeds without overflow the
+ * result is assigned the required sign and checked for overflow again.
+ */
+
+#define __intof_mul_negate	((__intof_oa < 1) != (__intof_ob < 1))
+#define __intof_mul_hshift	(sizeof(uintmax_t) * 8 / 2)
+#define __intof_mul_hmask	(UINTMAX_MAX >> __intof_mul_hshift)
+#define __intof_mul_a0		((uintmax_t)(__intof_a) >> __intof_mul_hshift)
+#define __intof_mul_b0		((uintmax_t)(__intof_b) >> __intof_mul_hshift)
+#define __intof_mul_a1		((uintmax_t)(__intof_a) & __intof_mul_hmask)
+#define __intof_mul_b1		((uintmax_t)(__intof_b) & __intof_mul_hmask)
+#define __intof_mul_t		(__intof_mul_a1 * __intof_mul_b0 + \
+				 __intof_mul_a0 * __intof_mul_b1)
+
+#define __INTOF_MUL(c, a, b) (__extension__({ \
+	typeof(a) __intof_oa = (a); \
+	typeof(a) __intof_a = __intof_oa < 1 ? -__intof_oa : __intof_oa; \
+	typeof(b) __intof_ob = (b); \
+	typeof(b) __intof_b = __intof_ob < 1 ? -__intof_ob : __intof_ob; \
+	typeof(c) __intof_c; \
+	\
+	__intof_oa == 0 || __intof_ob == 0 || \
+	__intof_oa == 1 || __intof_ob == 1 ? \
+		__INTOF_ASSIGN((c), __intof_oa * __intof_ob) : \
+	(__intof_mul_a0 && __intof_mul_b0) || \
+	 __intof_mul_t > __intof_mul_hmask ?  1 : \
+	__INTOF_ADD((__intof_c), __intof_mul_t << __intof_mul_hshift, \
+				 __intof_mul_a1 * __intof_mul_b1) ? 1 : \
+	__intof_mul_negate ? __INTOF_ASSIGN((c), -__intof_c) : \
+			     __INTOF_ASSIGN((c), __intof_c); \
+}))
+
+#define __compiler_add_overflow(a, b, res) __INTOF_ADD(*(res), (a), (b))
+#define __compiler_sub_overflow(a, b, res) __INTOF_SUB(*(res), (a), (b))
+#define __compiler_mul_overflow(a, b, res) __INTOF_MUL(*(res), (a), (b))
+
+#endif /*!__HAVE_BUILTIN_OVERFLOW*/
+
+#define __compiler_compare_and_swap(p, oval, nval) \
+	__atomic_compare_exchange_n((p), (oval), (nval), true, \
+				    __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) \
+
+#define __compiler_atomic_load(p) __atomic_load_n((p), __ATOMIC_RELAXED)
+#define __compiler_atomic_store(p, val) \
+	__atomic_store_n((p), (val), __ATOMIC_RELAXED)
+
+#endif /*COMPILER_H*/
diff --git a/components/common/utils/include/util.h b/components/common/utils/include/util.h
new file mode 100644
index 0000000..03ab476
--- /dev/null
+++ b/components/common/utils/include/util.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ */
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <compiler.h>
+#include <inttypes.h>
+
+#define SIZE_4K	UINTPTR_C(0x1000)
+#define SIZE_1M	UINTPTR_C(0x100000)
+#define SIZE_2M	UINTPTR_C(0x200000)
+#define SIZE_4M	UINTPTR_C(0x400000)
+#define SIZE_8M	UINTPTR_C(0x800000)
+#define SIZE_2G	UINTPTR_C(0x80000000)
+
+#ifndef MAX
+#ifndef __ASSEMBLER__
+#define MAX(a, b) \
+	(__extension__({ __typeof__(a) _a = (a); \
+	   __typeof__(b) _b = (b); \
+	 _a > _b ? _a : _b; }))
+
+#define MIN(a, b) \
+	(__extension__({ __typeof__(a) _a = (a); \
+	   __typeof__(b) _b = (b); \
+	 _a < _b ? _a : _b; }))
+#else
+#define MAX(a, b)	(((a) > (b)) ? (a) : (b))
+#define MIN(a, b)	(((a) < (b)) ? (a) : (b))
+#endif
+#endif
+
+/*
+ * In some particular conditions MAX and MIN macros fail to
+ * build from C source file implmentation. In such case one
+ * need to use MAX_UNSAFE/MIN_UNSAFE instead.
+ */
+#define MAX_UNSAFE(a, b)	(((a) > (b)) ? (a) : (b))
+#define MIN_UNSAFE(a, b)	(((a) < (b)) ? (a) : (b))
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#ifndef __ASSEMBLER__
+/* Round up the even multiple of size, size has to be a multiple of 2 */
+#define ROUNDUP(v, size) (((v) + ((__typeof__(v))(size) - 1)) & \
+			  ~((__typeof__(v))(size) - 1))
+
+#define ROUNDUP_OVERFLOW(v, size, res) (__extension__({ \
+	typeof(*(res)) __roundup_tmp = 0; \
+	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
+	\
+	ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \
+		(void)(*(res) = __roundup_tmp & ~__roundup_mask), 0; \
+}))
+
+/*
+ * Rounds up to the nearest multiple of y and then divides by y. Safe
+ * against overflow, y has to be a multiple of 2.
+ *
+ * This macro is intended to be used to convert from "number of bytes" to
+ * "number of pages" or similar units. Example:
+ * num_pages = ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE);
+ */
+#define ROUNDUP_DIV(x, y) (__extension__({ \
+	typeof(x) __roundup_x = (x); \
+	typeof(y) __roundup_mask = (typeof(x))(y) - 1; \
+	\
+	(__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \
+}))
+
+/* Round down the even multiple of size, size has to be a multiple of 2 */
+#define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1))
+
+/* Unsigned integer division with nearest rounding variant */
+#define UDIV_ROUND_NEAREST(x, y) \
+	(__extension__ ({ __typeof__(x) _x = (x); \
+	  __typeof__(y) _y = (y); \
+	  (_x + (_y / 2)) / _y; }))
+#else
+#define ROUNDUP(x, y)			((((x) + (y) - 1) / (y)) * (y))
+#define ROUNDDOWN(x, y)		(((x) / (y)) * (y))
+#define UDIV_ROUND_NEAREST(x, y)	(((x) + ((y) / 2)) / (y))
+#endif
+
+/* x has to be of an unsigned type */
+#define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
+
+#define ALIGNMENT_IS_OK(p, type) \
+	(((uintptr_t)(p) & (__alignof__(type) - 1)) == 0)
+
+#define TO_STR(x) _TO_STR(x)
+#define _TO_STR(x) #x
+
+#define CONCAT(x, y) _CONCAT(x, y)
+#define _CONCAT(x, y) x##y
+
+#define container_of(ptr, type, member) \
+	(__extension__({ \
+		const typeof(((type *)0)->member) *__ptr = (ptr); \
+		(type *)((unsigned long)(__ptr) - offsetof(type, member)); \
+	}))
+
+#define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+
+#ifdef __ASSEMBLER__
+#define BIT32(nr)		(1 << (nr))
+#define BIT64(nr)		(1 << (nr))
+#define SHIFT_U32(v, shift)	((v) << (shift))
+#define SHIFT_U64(v, shift)	((v) << (shift))
+#else
+#define BIT32(nr)		(UINT32_C(1) << (nr))
+#define BIT64(nr)		(UINT64_C(1) << (nr))
+#define SHIFT_U32(v, shift)	((uint32_t)(v) << (shift))
+#define SHIFT_U64(v, shift)	((uint64_t)(v) << (shift))
+#endif
+#define BIT(nr)			BIT32(nr)
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK_32(h, l) \
+	(((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
+
+/*
+ * Checking overflow for addition, subtraction and multiplication. Result
+ * of operation is stored in res which is a pointer to some kind of
+ * integer.
+ *
+ * The macros return true if an overflow occurred and *res is undefined.
+ */
+#define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
+#define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
+#define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
+
+/* Return a signed +1, 0 or -1 value based on data comparison */
+#define CMP_TRILEAN(a, b) \
+	(__extension__({ \
+		__typeof__(a) _a = (a); \
+		__typeof__(b) _b = (b); \
+		\
+		_a > _b ? 1 : _a < _b ? -1 : 0; \
+	}))
+
+#ifndef __ASSEMBLER__
+static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
+{
+	return (uint64_t)reg0 << 32 | reg1;
+}
+
+static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
+				    uint32_t *reg1)
+{
+	*reg0 = val >> 32;
+	*reg1 = val;
+}
+#endif
+
+#endif /*UTIL_H*/